Esempio n. 1
0
unsigned long
arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
			  const unsigned long len, const unsigned long pgoff,
			  const unsigned long flags)
{
	struct vm_area_struct *vma;
	struct mm_struct *mm = current->mm;
	unsigned long addr = addr0, pax_task_size = TASK_SIZE;
	struct vm_unmapped_area_info info;
	unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);

#ifdef CONFIG_PAX_SEGMEXEC
	if (mm->pax_flags & MF_PAX_SEGMEXEC)
		pax_task_size = SEGMEXEC_TASK_SIZE;
#endif

	pax_task_size -= PAGE_SIZE;

	/* requested length too big for entire address space */
	if (len > pax_task_size)
		return -ENOMEM;

	if (flags & MAP_FIXED)
		return addr;

#ifdef CONFIG_PAX_PAGEEXEC
	if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
		goto bottomup;
#endif

#ifdef CONFIG_PAX_RANDMMAP
	if (!(mm->pax_flags & MF_PAX_RANDMMAP))
#endif

	/* requesting a specific address */
	if (addr) {
		addr = PAGE_ALIGN(addr);
		if (pax_task_size - len >= addr) {
			vma = find_vma(mm, addr);
			if (check_heap_stack_gap(vma, addr, len, offset))
				return addr;
		}
	}

	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
	info.length = len;
	info.low_limit = PAGE_SIZE;
	info.high_limit = mm->mmap_base;
	info.align_mask = filp ? get_align_mask() : 0;
	info.align_offset = pgoff << PAGE_SHIFT;
	info.threadstack_offset = offset;

	addr = vm_unmapped_area(&info);
	if (!(addr & ~PAGE_MASK))
		return addr;
	VM_BUG_ON(addr != -ENOMEM);

bottomup:
	/*
	 * A failed mmap() very likely causes application failure,
	 * so fall back to the bottom-up function here. This scenario
	 * can happen with large stack limits and large mmap()
	 * allocations.
	 */
	return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
}
BCE_ERROR BCAllocContigMemory(unsigned long ulSize,
                              BCE_HANDLE unref__ *phMemHandle,
                              IMG_CPU_VIRTADDR *pLinAddr,
                              IMG_CPU_PHYADDR *pPhysAddr)
{
#if defined(LMA)
	void *pvLinAddr;
	
	
	if(g_ulMemCurrent + ulSize >= PVR_BUFFERCLASS_MEMSIZE)
	{
		return (BCE_ERROR_OUT_OF_MEMORY);
	}

	pvLinAddr = ioremap(g_ulMemBase + g_ulMemCurrent, ulSize);

	if(pvLinAddr)
	{
		pPhysAddr->uiAddr = g_ulMemBase + g_ulMemCurrent;
		*pLinAddr = pvLinAddr;	

		
		g_ulMemCurrent += ulSize;
		return (BCE_OK);
	}
	return (BCE_ERROR_OUT_OF_MEMORY);
#else	
#if defined(BCE_USE_SET_MEMORY)
	void *pvLinAddr;
	unsigned long ulAlignedSize = PAGE_ALIGN(ulSize);
	int iPages = (int)(ulAlignedSize >> PAGE_SHIFT);
	int iError;

	pvLinAddr = kmalloc(ulAlignedSize, GFP_KERNEL);
	BUG_ON(((unsigned long)pvLinAddr)  & ~PAGE_MASK);

	iError = set_memory_wc((unsigned long)pvLinAddr, iPages);
	if (iError != 0)
	{
		printk(KERN_ERR DRVNAME ": BCAllocContigMemory:  set_memory_wc failed (%d)\n", iError);
		return (BCE_ERROR_OUT_OF_MEMORY);
	}

	pPhysAddr->uiAddr = virt_to_phys(pvLinAddr);
	*pLinAddr = pvLinAddr;

	return (BCE_OK);
#else	
	dma_addr_t dma;
	void *pvLinAddr;

	pvLinAddr = dma_alloc_coherent(NULL, ulSize, &dma, GFP_KERNEL);
	if (pvLinAddr == NULL)
	{
		return (BCE_ERROR_OUT_OF_MEMORY);
	}

	pPhysAddr->uiAddr = dma;
	*pLinAddr = pvLinAddr;

	return (BCE_OK);
#endif	
#endif	
}
Esempio n. 3
0
static int __devinit cg14_init_one(struct of_device *op)
{
	struct device_node *dp = op->node;
	struct all_info *all;
	int is_8mb, linebytes, i, err;

	all = kzalloc(sizeof(*all), GFP_KERNEL);
	if (!all)
		return -ENOMEM;

	spin_lock_init(&all->par.lock);

	sbusfb_fill_var(&all->info.var, dp->node, 8);
	all->info.var.red.length = 8;
	all->info.var.green.length = 8;
	all->info.var.blue.length = 8;

	linebytes = of_getintprop_default(dp, "linebytes",
					  all->info.var.xres);
	all->par.fbsize = PAGE_ALIGN(linebytes * all->info.var.yres);

	if (!strcmp(dp->parent->name, "sbus") ||
	    !strcmp(dp->parent->name, "sbi")) {
		all->par.physbase = op->resource[0].start;
		all->par.iospace = op->resource[0].flags & IORESOURCE_BITS;
	} else {
		all->par.physbase = op->resource[1].start;
		all->par.iospace = op->resource[0].flags & IORESOURCE_BITS;
	}

	all->par.regs = of_ioremap(&op->resource[0], 0,
				   sizeof(struct cg14_regs), "cg14 regs");
	all->par.clut = of_ioremap(&op->resource[0], CG14_CLUT1,
				   sizeof(struct cg14_clut), "cg14 clut");
	all->par.cursor = of_ioremap(&op->resource[0], CG14_CURSORREGS,
				   sizeof(struct cg14_cursor), "cg14 cursor");

	all->info.screen_base = of_ioremap(&op->resource[1], 0,
					   all->par.fbsize, "cg14 ram");

	if (!all->par.regs || !all->par.clut || !all->par.cursor ||
	    !all->info.screen_base)
		cg14_unmap_regs(all);

	is_8mb = (((op->resource[1].end - op->resource[1].start) + 1) ==
		  (8 * 1024 * 1024));

	BUILD_BUG_ON(sizeof(all->par.mmap_map) != sizeof(__cg14_mmap_map));
		
	memcpy(&all->par.mmap_map, &__cg14_mmap_map,
	       sizeof(all->par.mmap_map));

	for (i = 0; i < CG14_MMAP_ENTRIES; i++) {
		struct sbus_mmap_map *map = &all->par.mmap_map[i];

		if (!map->size)
			break;
		if (map->poff & 0x80000000)
			map->poff = (map->poff & 0x7fffffff) +
				(op->resource[0].start -
				 op->resource[1].start);
		if (is_8mb &&
		    map->size >= 0x100000 &&
		    map->size <= 0x400000)
			map->size *= 2;
	}

	all->par.mode = MDI_8_PIX;
	all->par.ramsize = (is_8mb ? 0x800000 : 0x400000);

	all->info.flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN;
	all->info.fbops = &cg14_ops;
	all->info.par = &all->par;

	__cg14_reset(&all->par);

	if (fb_alloc_cmap(&all->info.cmap, 256, 0)) {
		cg14_unmap_regs(all);
		kfree(all);
		return -ENOMEM;
	}
	fb_set_cmap(&all->info.cmap, &all->info);

	cg14_init_fix(&all->info, linebytes, dp);

	err = register_framebuffer(&all->info);
	if (err < 0) {
		fb_dealloc_cmap(&all->info.cmap);
		cg14_unmap_regs(all);
		kfree(all);
		return err;
	}

	dev_set_drvdata(&op->dev, all);

	printk("%s: cgfourteen at %lx:%lx, %dMB\n",
	       dp->full_name,
	       all->par.iospace, all->par.physbase,
	       all->par.ramsize >> 20);

	return 0;
}
Esempio n. 4
0
int q6usm_us_param_buf_alloc(unsigned int dir,
			struct us_client *usc,
			unsigned int bufsz)
{
	int rc = 0;
	struct us_port_data *port = NULL;
	unsigned int size = bufsz;
	size_t len;

	if ((usc == NULL) ||
		((dir != IN) && (dir != OUT)) ||
		(usc->session <= 0 || usc->session > USM_SESSION_MAX)) {
		pr_err("%s: wrong parameters: direction=%d, bufsz=%d\n",
			__func__, dir, bufsz);
		return -EINVAL;
	}

	mutex_lock(&usc->cmd_lock);

	port = &usc->port[dir];

	if (bufsz == 0) {
		pr_debug("%s: bufsz=0, get/set param commands are forbidden\n",
			__func__);
		port->param_buf = NULL;
		mutex_unlock(&usc->cmd_lock);
		return rc;
	}

	/* The size to allocate should be multiple of 4K bytes */
	size = PAGE_ALIGN(size);

	rc = msm_audio_ion_alloc("ultrasound_client",
		&port->param_client, &port->param_handle,
		size, &port->param_phys,
		&len, &port->param_buf);

	if (rc) {
		pr_err("%s: US ION allocation failed, rc = %d\n",
			__func__, rc);
		mutex_unlock(&usc->cmd_lock);
		return -ENOMEM;
	}

	port->param_buf_size = bufsz;
	pr_debug("%s: param_buf[%p]; param_phys[%llx]; [%p]\n", __func__,
		 (void *)port->param_buf,
		 (u64)port->param_phys,
		 (void *)&port->param_phys);

	rc = q6usm_memory_map(port->param_phys, (IN | OUT), size, 1,
			usc->session, (uint32_t *)port->param_buf_mem_handle);
	if (rc < 0) {
		pr_err("%s: CMD Memory_map failed\n", __func__);
		mutex_unlock(&usc->cmd_lock);
		q6usm_us_client_buf_free(dir, usc);
		q6usm_us_param_buf_free(dir, usc);
	} else {
		mutex_unlock(&usc->cmd_lock);
		rc = 0;
	}

	return rc;
}
Esempio n. 5
0
ULONG
MiDecommitPages (
    IN PVOID StartingAddress,
    IN PMMPTE EndingPte,
    IN PEPROCESS Process,
    IN PMMVAD_SHORT Vad
)

/*++

Routine Description:

    This routine decommits the specified range of pages.

Arguments:

    StartingAddress - Supplies the starting address of the range.

    EndingPte - Supplies the ending PTE of the range.

    Process - Supplies the current process.

    Vad - Supplies the virtual address descriptor which describes the range.

Return Value:

    Value to reduce commitment by for the VAD.

Environment:

    Kernel mode, APCs disabled, AddressCreation mutex held.

--*/

{
    PMMPTE PointerPde;
    PMMPTE PointerPte;
    PVOID Va;
    ULONG CommitReduction;
    PMMPTE CommitLimitPte;
    KIRQL OldIrql;
    PMMPTE ValidPteList[MM_VALID_PTE_SIZE];
    ULONG count;
    WSLE_NUMBER WorkingSetIndex;
    PMMPFN Pfn1;
    PMMPFN Pfn2;
    WSLE_NUMBER Entry;
    MMWSLENTRY Locked;
    MMPTE PteContents;
    PFN_NUMBER PageTableFrameIndex;
    PVOID UsedPageTableHandle;
    PETHREAD CurrentThread;

    count = 0;
    CommitReduction = 0;

    if (Vad->u.VadFlags.MemCommit) {
        CommitLimitPte = MiGetPteAddress (MI_VPN_TO_VA (Vad->EndingVpn));
    }
    else {
        CommitLimitPte = NULL;
    }

    //
    // Decommit each page by setting the PTE to be explicitly
    // decommitted.  The PTEs cannot be deleted all at once as
    // this would set the PTEs to zero which would auto-evaluate
    // as committed if referenced by another thread when a page
    // table page is being in-paged.
    //

    PointerPde = MiGetPdeAddress (StartingAddress);
    PointerPte = MiGetPteAddress (StartingAddress);
    Va = StartingAddress;

    //
    // Loop through all the PDEs which map this region and ensure that
    // they exist.  If they don't exist create them by touching a
    // PTE mapped by the PDE.
    //

    CurrentThread = PsGetCurrentThread ();

    LOCK_WS_UNSAFE (CurrentThread, Process);

    MiMakePdeExistAndMakeValid (PointerPde, Process, MM_NOIRQL);

    while (PointerPte <= EndingPte) {

        if (MiIsPteOnPdeBoundary (PointerPte)) {

            PointerPde = MiGetPdeAddress (Va);
            if (count != 0) {
                MiProcessValidPteList (&ValidPteList[0], count);
                count = 0;
            }

            MiMakePdeExistAndMakeValid (PointerPde, Process, MM_NOIRQL);
        }

        //
        // The working set lock is held.  No PTEs can go from
        // invalid to valid or valid to invalid.  Transition
        // PTEs can go from transition to pagefile.
        //

        PteContents = *PointerPte;

        if (PteContents.u.Long != 0) {

            if (PointerPte->u.Long == MmDecommittedPte.u.Long) {

                //
                // This PTE is already decommitted.
                //

                CommitReduction += 1;
            }
            else {

                Process->NumberOfPrivatePages -= 1;

                if (PteContents.u.Hard.Valid == 1) {

                    //
                    // Make sure this is not a forked PTE.
                    //

                    Pfn1 = MI_PFN_ELEMENT (PteContents.u.Hard.PageFrameNumber);

                    if (Pfn1->u3.e1.PrototypePte) {

                        //
                        // MiDeletePte may release both the working set pushlock
                        // and the PFN lock so the valid PTE list must be
                        // processed now.
                        //

                        if (count != 0) {
                            MiProcessValidPteList (&ValidPteList[0], count);
                            count = 0;
                        }

                        LOCK_PFN (OldIrql);

                        MiDeletePte (PointerPte,
                                     Va,
                                     FALSE,
                                     Process,
                                     NULL,
                                     NULL,
                                     OldIrql);

                        UNLOCK_PFN (OldIrql);

                        Process->NumberOfPrivatePages += 1;
                        MI_WRITE_INVALID_PTE (PointerPte, MmDecommittedPte);
                    }
                    else {

                        //
                        // PTE is valid, process later when PFN lock is held.
                        //

                        if (count == MM_VALID_PTE_SIZE) {
                            MiProcessValidPteList (&ValidPteList[0], count);
                            count = 0;
                        }
                        ValidPteList[count] = PointerPte;
                        count += 1;

                        //
                        // Remove address from working set list.
                        //

                        WorkingSetIndex = Pfn1->u1.WsIndex;

                        ASSERT (PAGE_ALIGN(MmWsle[WorkingSetIndex].u1.Long) ==
                                Va);
                        //
                        // Check to see if this entry is locked in the
                        // working set or locked in memory.
                        //

                        Locked = MmWsle[WorkingSetIndex].u1.e1;

                        MiRemoveWsle (WorkingSetIndex, MmWorkingSetList);

                        //
                        // Add this entry to the list of free working set
                        // entries and adjust the working set count.
                        //

                        MiReleaseWsle (WorkingSetIndex, &Process->Vm);

                        if ((Locked.LockedInWs == 1) || (Locked.LockedInMemory == 1)) {

                            //
                            // This entry is locked.
                            //

                            MmWorkingSetList->FirstDynamic -= 1;

                            if (WorkingSetIndex != MmWorkingSetList->FirstDynamic) {
                                Entry = MmWorkingSetList->FirstDynamic;
                                ASSERT (MmWsle[Entry].u1.e1.Valid);

                                MiSwapWslEntries (Entry,
                                                  WorkingSetIndex,
                                                  &Process->Vm,
                                                  FALSE);
                            }
                        }
                        MI_SET_PTE_IN_WORKING_SET (PointerPte, 0);
                    }
                }
                else if (PteContents.u.Soft.Prototype) {

                    //
                    // This is a forked PTE, just delete it.
                    //
                    // MiDeletePte may release both the working set pushlock
                    // and the PFN lock so the valid PTE list must be
                    // processed now.
                    //

                    if (count != 0) {
                        MiProcessValidPteList (&ValidPteList[0], count);
                        count = 0;
                    }

                    LOCK_PFN (OldIrql);

                    MiDeletePte (PointerPte,
                                 Va,
                                 FALSE,
                                 Process,
                                 NULL,
                                 NULL,
                                 OldIrql);

                    UNLOCK_PFN (OldIrql);

                    Process->NumberOfPrivatePages += 1;
                    MI_WRITE_INVALID_PTE (PointerPte, MmDecommittedPte);
                }
                else if (PteContents.u.Soft.Transition == 1) {

                    //
                    // Transition PTE, get the PFN database lock
                    // and reprocess this one.
                    //

                    LOCK_PFN (OldIrql);
                    PteContents = *PointerPte;

                    if (PteContents.u.Soft.Transition == 1) {

                        //
                        // PTE is still in transition, delete it.
                        //

                        Pfn1 = MI_PFN_ELEMENT (PteContents.u.Trans.PageFrameNumber);

                        MI_SET_PFN_DELETED (Pfn1);

                        PageTableFrameIndex = Pfn1->u4.PteFrame;
                        Pfn2 = MI_PFN_ELEMENT (PageTableFrameIndex);

                        MiDecrementShareCountInline (Pfn2, PageTableFrameIndex);

                        //
                        // Check the reference count for the page, if the
                        // reference count is zero, move the page to the
                        // free list, if the reference count is not zero,
                        // ignore this page.  When the reference count
                        // goes to zero, it will be placed on the free list.
                        //

                        if (Pfn1->u3.e2.ReferenceCount == 0) {
                            MiUnlinkPageFromList (Pfn1);
                            MiReleasePageFileSpace (Pfn1->OriginalPte);
                            MiInsertPageInFreeList (MI_GET_PAGE_FRAME_FROM_TRANSITION_PTE(&PteContents));
                        }

                    }
                    else {

                        //
                        // Page MUST be in page file format!
                        //

                        ASSERT (PteContents.u.Soft.Valid == 0);
                        ASSERT (PteContents.u.Soft.Prototype == 0);
                        ASSERT (PteContents.u.Soft.PageFileHigh != 0);
                        MiReleasePageFileSpace (PteContents);
                    }
                    MI_WRITE_INVALID_PTE (PointerPte, MmDecommittedPte);
                    UNLOCK_PFN (OldIrql);
                }
                else {

                    //
                    // Must be demand zero or paging file format.
                    //

                    if (PteContents.u.Soft.PageFileHigh != 0) {
                        LOCK_PFN (OldIrql);
                        MiReleasePageFileSpace (PteContents);
                        UNLOCK_PFN (OldIrql);
                    }
                    else {

                        //
                        // Don't subtract out the private page count for
                        // a demand zero page.
                        //

                        Process->NumberOfPrivatePages += 1;
                    }

                    MI_WRITE_INVALID_PTE (PointerPte, MmDecommittedPte);
                }
            }
        }
        else {

            //
            // The PTE is already zero.
            //

            //
            // Increment the count of non-zero page table entries for this
            // page table and the number of private pages for the process.
            //

            UsedPageTableHandle = MI_GET_USED_PTES_HANDLE (Va);

            MI_INCREMENT_USED_PTES_BY_HANDLE (UsedPageTableHandle);

            if (PointerPte > CommitLimitPte) {

                //
                // PTE is not committed.
                //

                CommitReduction += 1;
            }
            MI_WRITE_INVALID_PTE (PointerPte, MmDecommittedPte);
        }

        PointerPte += 1;
        Va = (PVOID)((PCHAR)Va + PAGE_SIZE);
    }
    if (count != 0) {
        MiProcessValidPteList (&ValidPteList[0], count);
    }

    UNLOCK_WS_UNSAFE (CurrentThread, Process);

    return CommitReduction;
}
Esempio n. 6
0
File: mixart.c Progetto: 274914765/C
/*
 *    probe function - creates the card manager
 */
static int __devinit snd_mixart_probe(struct pci_dev *pci,
                      const struct pci_device_id *pci_id)
{
    static int dev;
    struct mixart_mgr *mgr;
    unsigned int i;
    int err;
    size_t size;

    /*
     */
    if (dev >= SNDRV_CARDS)
        return -ENODEV;
    if (! enable[dev]) {
        dev++;
        return -ENOENT;
    }

    /* enable PCI device */
    if ((err = pci_enable_device(pci)) < 0)
        return err;
    pci_set_master(pci);

    /* check if we can restrict PCI DMA transfers to 32 bits */
    if (pci_set_dma_mask(pci, DMA_32BIT_MASK) < 0) {
        snd_printk(KERN_ERR "architecture does not support 32bit PCI busmaster DMA\n");
        pci_disable_device(pci);
        return -ENXIO;
    }

    /*
     */
    mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
    if (! mgr) {
        pci_disable_device(pci);
        return -ENOMEM;
    }

    mgr->pci = pci;
    mgr->irq = -1;

    /* resource assignment */
    if ((err = pci_request_regions(pci, CARD_NAME)) < 0) {
        kfree(mgr);
        pci_disable_device(pci);
        return err;
    }
    for (i = 0; i < 2; i++) {
        mgr->mem[i].phys = pci_resource_start(pci, i);
        mgr->mem[i].virt = ioremap_nocache(mgr->mem[i].phys,
                           pci_resource_len(pci, i));
        if (!mgr->mem[i].virt) {
                printk(KERN_ERR "unable to remap resource 0x%lx\n",
                   mgr->mem[i].phys);
            snd_mixart_free(mgr);
            return -EBUSY;
        }
    }

    if (request_irq(pci->irq, snd_mixart_interrupt, IRQF_SHARED,
            CARD_NAME, mgr)) {
        snd_printk(KERN_ERR "unable to grab IRQ %d\n", pci->irq);
        snd_mixart_free(mgr);
        return -EBUSY;
    }
    mgr->irq = pci->irq;

    sprintf(mgr->shortname, "Digigram miXart");
    sprintf(mgr->longname, "%s at 0x%lx & 0x%lx, irq %i", mgr->shortname, mgr->mem[0].phys, mgr->mem[1].phys, mgr->irq);

    /* ISR spinlock  */
    spin_lock_init(&mgr->lock);

    /* init mailbox  */
    mgr->msg_fifo_readptr = 0;
    mgr->msg_fifo_writeptr = 0;

    spin_lock_init(&mgr->msg_lock);
    mutex_init(&mgr->msg_mutex);
    init_waitqueue_head(&mgr->msg_sleep);
    atomic_set(&mgr->msg_processed, 0);

    /* init setup mutex*/
    mutex_init(&mgr->setup_mutex);

    /* init message taslket */
    tasklet_init(&mgr->msg_taskq, snd_mixart_msg_tasklet, (unsigned long) mgr);

    /* card assignment */
    mgr->num_cards = MIXART_MAX_CARDS; /* 4  FIXME: configurable? */
    for (i = 0; i < mgr->num_cards; i++) {
        struct snd_card *card;
        char tmpid[16];
        int idx;

        if (index[dev] < 0)
            idx = index[dev];
        else
            idx = index[dev] + i;
        snprintf(tmpid, sizeof(tmpid), "%s-%d", id[dev] ? id[dev] : "MIXART", i);
        card = snd_card_new(idx, tmpid, THIS_MODULE, 0);

        if (! card) {
            snd_printk(KERN_ERR "cannot allocate the card %d\n", i);
            snd_mixart_free(mgr);
            return -ENOMEM;
        }

        strcpy(card->driver, CARD_NAME);
        sprintf(card->shortname, "%s [PCM #%d]", mgr->shortname, i);
        sprintf(card->longname, "%s [PCM #%d]", mgr->longname, i);

        if ((err = snd_mixart_create(mgr, card, i)) < 0) {
            snd_mixart_free(mgr);
            return err;
        }

        if(i==0) {
            /* init proc interface only for chip0 */
            snd_mixart_proc_init(mgr->chip[i]);
        }

        if ((err = snd_card_register(card)) < 0) {
            snd_mixart_free(mgr);
            return err;
        }
    }

    /* init firmware status (mgr->dsp_loaded reset in hwdep_new) */
    mgr->board_type = MIXART_DAUGHTER_TYPE_NONE;

    /* create array of streaminfo */
    size = PAGE_ALIGN( (MIXART_MAX_STREAM_PER_CARD * MIXART_MAX_CARDS *
                sizeof(struct mixart_flowinfo)) );
    if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
                size, &mgr->flowinfo) < 0) {
        snd_mixart_free(mgr);
        return -ENOMEM;
    }
    /* init streaminfo_array */
    memset(mgr->flowinfo.area, 0, size);

    /* create array of bufferinfo */
    size = PAGE_ALIGN( (MIXART_MAX_STREAM_PER_CARD * MIXART_MAX_CARDS *
                sizeof(struct mixart_bufferinfo)) );
    if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
                size, &mgr->bufferinfo) < 0) {
        snd_mixart_free(mgr);
        return -ENOMEM;
    }
    /* init bufferinfo_array */
    memset(mgr->bufferinfo.area, 0, size);

    /* set up firmware */
    err = snd_mixart_setup_firmware(mgr);
    if (err < 0) {
        snd_mixart_free(mgr);
        return err;
    }

    pci_set_drvdata(pci, mgr);
    dev++;
    return 0;
}
Esempio n. 7
0
void *mmorecore(struct mdesc *mdp, ssize_t size)
{
  ssize_t test = 0;
  void *result; // please keep it uninitialized to track issues
  off_t foffset;                /* File offset at which new mapping will start */
  size_t mapbytes;              /* Number of bytes to map */
  void *moveto;                 /* Address where we wish to move "break value" to */
  void *mapto;                  /* Address we actually mapped to */
  char buf = 0;                 /* Single byte to write to extend mapped file */

//  fprintf(stderr,"increase %p by %u\n",mdp,size);
  if (pagesize == 0)
    pagesize = getpagesize();

  if (size == 0) {
    /* Just return the current "break" value. */
    result = mdp->breakval;

  } else if (size < 0) {
    /* We are deallocating memory.  If the amount requested would cause
       us to try to deallocate back past the base of the mmap'd region
       then die verbosely.  Otherwise, deallocate the memory and return
       the old break value. */
    if (((char *) mdp->breakval) + size >= (char *) mdp->base) {
      result = (void *) mdp->breakval;
      mdp->breakval = (char *) mdp->breakval + size;
      moveto = PAGE_ALIGN(mdp->breakval);
      munmap(moveto,
             (size_t) (((char *) mdp->top) - ((char *) moveto)) - 1);
      mdp->top = moveto;
    } else {
      fprintf(stderr,"Internal error: mmap was asked to deallocate more memory than it previously allocated. Bailling out now!\n");
      abort();
    }
  } else {
    /* We are allocating memory. Make sure we have an open file
       descriptor if not working with anonymous memory. */
    if (!(mdp->flags & MMALLOC_ANONYMOUS) && mdp->fd < 0) {
      fprintf(stderr,"Internal error: mmap file descriptor <0 (%d), without MMALLOC_ANONYMOUS being in the flags.\n",mdp->fd);
      abort();
    } else if ((char *) mdp->breakval + size > (char *) mdp->top) {
      /* The request would move us past the end of the currently
         mapped memory, so map in enough more memory to satisfy
         the request.  This means we also have to grow the mapped-to
         file by an appropriate amount, since mmap cannot be used
         to extend a file. */
      moveto = PAGE_ALIGN((char *) mdp->breakval + size);
      mapbytes = (char *) moveto - (char *) mdp->top;
      foffset = (char *) mdp->top - (char *) mdp->base;

      if (mdp->fd > 0) {
        /* FIXME:  Test results of lseek() */
        lseek(mdp->fd, foffset + mapbytes - 1, SEEK_SET);
        test = write(mdp->fd, &buf, 1);
        if (test == -1) {
          fprintf(stderr,"Internal error: write to mmap'ed fd failed! error: %s", strerror(errno));
          abort();
        }
      }

      /* Let's call mmap. Note that it is possible that mdp->top
         is 0. In this case mmap will choose the address for us */
      mapto = mmap(mdp->top, mapbytes, PROT_READ | PROT_WRITE,
                   MAP_PRIVATE_OR_SHARED(mdp) | MAP_IS_ANONYMOUS(mdp) |
                   MAP_FIXED, MAP_ANON_OR_FD(mdp), foffset);

      if (mapto == (void *) -1/* That's MAP_FAILED */) {
        char buff[1024];
        fprintf(stderr,"Internal error: mmap returned MAP_FAILED! error: %s\n",strerror(errno));
        sprintf(buff,"cat /proc/%d/maps",getpid());
        int status = system(buff);
        if (status == -1 || !(WIFEXITED(status) && WEXITSTATUS(status) == 0))
          fprintf(stderr, "Something went wrong when trying to %s\n", buff);
        sleep(1);
        abort();
      }

      if (mdp->top == 0)
        mdp->base = mdp->breakval = mapto;

      mdp->top = PAGE_ALIGN((char *) mdp->breakval + size);
      result = (void *) mdp->breakval;
      mdp->breakval = (char *) mdp->breakval + size;
    } else {
      result = (void *) mdp->breakval;
      mdp->breakval = (char *) mdp->breakval + size;
    }
  }
  return (result);
}
Esempio n. 8
0
void __init s5p_cma_region_reserve(struct cma_region *regions_normal,
				      struct cma_region *regions_secure,
				      size_t align_secure, const char *map)
{
	struct cma_region *reg;
	phys_addr_t paddr_last = 0xFFFFFFFF;

	for (reg = regions_normal; reg->size != 0; reg++) {
		phys_addr_t paddr;

		if (!IS_ALIGNED(reg->size, PAGE_SIZE)) {
			pr_debug("S5P/CMA: size of '%s' is NOT page-aligned\n",
								reg->name);
			reg->size = PAGE_ALIGN(reg->size);
		}


		if (reg->reserved) {
			pr_err("S5P/CMA: '%s' already reserved\n", reg->name);
			continue;
		}

		if (reg->alignment) {
			if ((reg->alignment & ~PAGE_MASK) ||
				(reg->alignment & ~reg->alignment)) {
				pr_err("S5P/CMA: Failed to reserve '%s': "
						"incorrect alignment 0x%08x.\n",
						reg->name, reg->alignment);
				continue;
			}
		} else {
			reg->alignment = PAGE_SIZE;
		}

		if (reg->start) {
			if (!memblock_is_region_reserved(reg->start, reg->size)
			    && (memblock_reserve(reg->start, reg->size) == 0))
				reg->reserved = 1;
			else {
				pr_err("S5P/CMA: Failed to reserve '%s'\n",
				       reg->name);
				continue;
			}

			pr_debug("S5P/CMA: "
				 "Reserved 0x%08x/0x%08x for '%s'\n",
				 reg->start, reg->size, reg->name);
			paddr = reg->start;
		} else {
			paddr = memblock_find_in_range(0,
					MEMBLOCK_ALLOC_ACCESSIBLE,
					reg->size, reg->alignment);
		}

		if (paddr) {
			if (memblock_reserve(paddr, reg->size)) {
				pr_err("S5P/CMA: Failed to reserve '%s'\n",
								reg->name);
				continue;
			}

			reg->start = paddr;
			reg->reserved = 1;

			pr_info("S5P/CMA: Reserved 0x%08x/0x%08x for '%s'\n",
						reg->start, reg->size, reg->name);
		} else {
			pr_err("S5P/CMA: No free space in memory for '%s'\n",
								reg->name);
		}

		if (cma_early_region_register(reg)) {
			pr_err("S5P/CMA: Failed to register '%s'\n",
								reg->name);
			memblock_free(reg->start, reg->size);
		} else {
			paddr_last = min(paddr, paddr_last);
		}
	}

	if (align_secure & ~align_secure) {
		pr_err("S5P/CMA: "
			"Wrong alignment requirement for secure region.\n");
	} else if (regions_secure && regions_secure->size) {
		size_t size_secure = 0;

		for (reg = regions_secure; reg->size != 0; reg++)
			size_secure += reg->size;

		reg--;

		/* Entire secure regions will be merged into 2
		 * consecutive regions. */
		if (align_secure == 0) {
			size_t size_region2;
			size_t order_region2;
			size_t aug_size;

			align_secure = 1 <<
				(get_order((size_secure + 1) / 2) + PAGE_SHIFT);
			/* Calculation of a subregion size */
			size_region2 = size_secure - align_secure;
			order_region2 = get_order(size_region2) + PAGE_SHIFT;
			if (order_region2 < 20)
				order_region2 = 20; /* 1MB */
			order_region2 -= 3; /* divide by 8 */
			size_region2 = ALIGN(size_region2, 1 << order_region2);

			aug_size = align_secure + size_region2 - size_secure;
			if (aug_size > 0) {
				reg->size += aug_size;
				size_secure += aug_size;
				pr_debug("S5P/CMA: "
					"Augmented size of '%s' by %#x B.\n",
					reg->name, aug_size);
			}
		} else
			size_secure = ALIGN(size_secure, align_secure);

		pr_info("S5P/CMA: "
			"Reserving %#x for secure region aligned by %#x.\n",
						size_secure, align_secure);

		if (paddr_last >= memblock.current_limit) {
			paddr_last = memblock_find_in_range(0,
					MEMBLOCK_ALLOC_ACCESSIBLE,
					size_secure, reg->alignment);
		} else {
			paddr_last -= size_secure;
			paddr_last = round_down(paddr_last, align_secure);
		}

		if (paddr_last) {
#ifndef CONFIG_DMA_CMA
			while (memblock_reserve(paddr_last, size_secure))
				paddr_last -= align_secure;
#else
			if (!reg->start) {
				while (memblock_reserve(paddr_last,
							size_secure))
					paddr_last -= align_secure;
			}
#endif

			do {
#ifndef CONFIG_DMA_CMA
				reg->start = paddr_last;
				reg->reserved = 1;
				paddr_last += reg->size;
#else
				if (reg->start) {
					reg->reserved = 1;
#ifdef CONFIG_USE_MFC_CMA
#if defined(CONFIG_MACH_M0)
					if (reg->start == 0x5C100000) {
						if (memblock_reserve(0x5C100000,
								0x700000))
							panic("memblock\n");
						if (memblock_reserve(0x5F000000,
								0x200000))
							panic("memblock\n");
					} else
#elif defined(CONFIG_MACH_GC1)
					if (reg->start == 0x50400000) {
						if (memblock_reserve(0x50400000,
								0x400000))
							panic("memblock\n");
						if (memblock_reserve(0x53000000,
								0x500000))
							panic("memblock\n");
					} else
#endif
					{
						if (memblock_reserve(reg->start,
								reg->size))
							panic("memblock\n");
					}
#else
					if (memblock_reserve(reg->start,
								reg->size))
						panic("memblock\n");
#endif
				} else {
					reg->start = paddr_last;
					reg->reserved = 1;
					paddr_last += reg->size;
				}
#endif
				pr_info("S5P/CMA: "
					"Reserved 0x%08x/0x%08x for '%s'\n",
					reg->start, reg->size, reg->name);
				if (cma_early_region_register(reg)) {
					memblock_free(reg->start, reg->size);
					pr_err("S5P/CMA: "
					"Failed to register secure region "
					"'%s'\n", reg->name);
				} else {
					size_secure -= reg->size;
				}
			} while (reg-- != regions_secure);

			if (size_secure > 0)
				memblock_free(paddr_last, size_secure);
		} else {
			pr_err("S5P/CMA: Failed to reserve secure regions\n");
		}
	}

	if (map)
		cma_set_defaults(NULL, map);
}
Esempio n. 9
0
#ifndef TRUE
#define TRUE			1
#endif
#ifndef FALSE
#define FALSE			0
#endif

#if defined(CONFIG_VIDEO_TVP514X) || defined(CONFIG_VIDEO_TVP514X_MODULE)
#if defined(CONFIG_VIDEO_OMAP3) || defined(CONFIG_VIDEO_OMAP3_MODULE)
static struct omap34xxcam_hw_config decoder_hwc = {
	.dev_index		= 0,
	.dev_minor		= 0,
	.dev_type		= OMAP34XXCAM_SLAVE_SENSOR,
	.u.sensor.sensor_isp	= 1,
	.u.sensor.capture_mem	= PAGE_ALIGN(720*525*2*4),
};

static struct isp_interface_config tvp5146_if_config = {
	.ccdc_par_ser		= ISP_PARLL_YUV_BT,
	.dataline_shift		= 0x1,
	.hsvs_syncdetect	= ISPCTRL_SYNC_DETECT_VSRISE,
	.strobe			= 0x0,
	.prestrobe		= 0x0,
	.shutter		= 0x0,
	.wait_hs_vs		= 2,
	.u.par.par_bridge	= 0x0,
	.u.par.par_clk_pol	= 0x0,
};
#endif
Esempio n. 10
0
int vchiq_platform_init(struct platform_device *pdev, VCHIQ_STATE_T *state)
{
	struct device *dev = &pdev->dev;
	struct rpi_firmware *fw = platform_get_drvdata(pdev);
	VCHIQ_SLOT_ZERO_T *vchiq_slot_zero;
	struct resource *res;
	void *slot_mem;
	dma_addr_t slot_phys;
	u32 channelbase;
	int slot_mem_size, frag_mem_size;
	int err, irq, i;

	g_virt_to_bus_offset = virt_to_dma(dev, (void *)0);

	(void)of_property_read_u32(dev->of_node, "cache-line-size",
				   &g_cache_line_size);
	g_fragments_size = 2 * g_cache_line_size;

	/* Allocate space for the channels in coherent memory */
	slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE);
	frag_mem_size = PAGE_ALIGN(g_fragments_size * MAX_FRAGMENTS);

	slot_mem = dmam_alloc_coherent(dev, slot_mem_size + frag_mem_size,
				       &slot_phys, GFP_KERNEL);
	if (!slot_mem) {
		dev_err(dev, "could not allocate DMA memory\n");
		return -ENOMEM;
	}

	WARN_ON(((int)slot_mem & (PAGE_SIZE - 1)) != 0);

	vchiq_slot_zero = vchiq_init_slots(slot_mem, slot_mem_size);
	if (!vchiq_slot_zero)
		return -EINVAL;

	vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] =
		(int)slot_phys + slot_mem_size;
	vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] =
		MAX_FRAGMENTS;

	g_fragments_base = (char *)slot_mem + slot_mem_size;
	slot_mem_size += frag_mem_size;

	g_free_fragments = g_fragments_base;
	for (i = 0; i < (MAX_FRAGMENTS - 1); i++) {
		*(char **)&g_fragments_base[i*g_fragments_size] =
			&g_fragments_base[(i + 1)*g_fragments_size];
	}
	*(char **)&g_fragments_base[i * g_fragments_size] = NULL;
	sema_init(&g_free_fragments_sema, MAX_FRAGMENTS);

	if (vchiq_init_state(state, vchiq_slot_zero, 0) != VCHIQ_SUCCESS)
		return -EINVAL;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	g_regs = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(g_regs))
		return PTR_ERR(g_regs);

	irq = platform_get_irq(pdev, 0);
	if (irq <= 0) {
		dev_err(dev, "failed to get IRQ\n");
		return irq;
	}

	err = devm_request_irq(dev, irq, vchiq_doorbell_irq, IRQF_IRQPOLL,
			       "VCHIQ doorbell", state);
	if (err) {
		dev_err(dev, "failed to register irq=%d\n", irq);
		return err;
	}

	/* Send the base address of the slots to VideoCore */
	channelbase = slot_phys;
	err = rpi_firmware_property(fw, RPI_FIRMWARE_VCHIQ_INIT,
				    &channelbase, sizeof(channelbase));
	if (err || channelbase) {
		dev_err(dev, "failed to set channelbase\n");
		return err ? : -ENXIO;
	}
Esempio n. 11
0
unsigned long
load_kernel(unsigned long load_addr, int num_words, unsigned long cksum,
		bd_t *bp)
{
#ifdef INTERACTIVE_CONSOLE
	int timer = 0;
	char ch;
#endif
	char *cp;
	int zimage_size;
	unsigned long initrd_size;

	/* First, capture the embedded board information.  Then
	 * initialize the serial console port.
	 */
	embed_config(&bp);
	com_port = serial_init(0, bp);

	/* copy board data */
	if (bp)
		memcpy(hold_residual,bp,sizeof(bd_t));

	/* Set end of memory available to us.  It is always the highest
	 * memory address provided by the board information.
	 */
	end_avail = (char *)(bp->bi_memsize);

	puts("\nloaded at:     "); puthex(load_addr);
	puts(" "); puthex((unsigned long)(load_addr + (4*num_words))); puts("\n");
	if ( (unsigned long)load_addr != (unsigned long)&start ) {
		puts("relocated to:  "); puthex((unsigned long)&start);
		puts(" ");
		puthex((unsigned long)((unsigned long)&start + (4*num_words)));
		puts("\n");
	}

	if ( bp ) {
		puts("board data at: "); puthex((unsigned long)bp);
		puts(" ");
		puthex((unsigned long)((unsigned long)bp + sizeof(bd_t)));
		puts("\nrelocated to:  ");
		puthex((unsigned long)hold_residual);
		puts(" ");
		puthex((unsigned long)((unsigned long)hold_residual + sizeof(bd_t)));
		puts("\n");
	}

	/*
	 * We link ourself to an arbitrary low address.  When we run, we
	 * relocate outself to that address.  __image_being points to
	 * the part of the image where the zImage is. -- Tom
	 */
	zimage_start = (char *)(unsigned long)(&__image_begin);
	zimage_size = (unsigned long)(&__image_end) -
			(unsigned long)(&__image_begin);

	initrd_size = (unsigned long)(&__ramdisk_end) -
		(unsigned long)(&__ramdisk_begin);

	/*
	 * The zImage and initrd will be between start and _end, so they've
	 * already been moved once.  We're good to go now. -- Tom
	 */
	puts("zimage at:     "); puthex((unsigned long)zimage_start);
	puts(" "); puthex((unsigned long)(zimage_size+zimage_start));
	puts("\n");

	if ( initrd_size ) {
		puts("initrd at:     ");
		puthex((unsigned long)(&__ramdisk_begin));
		puts(" "); puthex((unsigned long)(&__ramdisk_end));puts("\n");
	}

	/*
	 * setup avail_ram - this is the first part of ram usable
	 * by the uncompress code.  Anything after this program in RAM
	 * is now fair game. -- Tom
	 */
	avail_ram = (char *)PAGE_ALIGN((unsigned long)_end);

	puts("avail ram:     "); puthex((unsigned long)avail_ram); puts(" ");
	puthex((unsigned long)end_avail); puts("\n");
	puts("\nLinux/PPC load: ");
	cp = cmd_line;
	/* This is where we try and pick the right command line for booting.
	 * If we were given one at compile time, use it.  It Is Right.
	 * If we weren't, see if we have a ramdisk.  If so, thats root.
	 * When in doubt, give them the netroot (root=/dev/nfs rw) -- Tom
	 */
#ifdef CONFIG_CMDLINE_BOOL
	memcpy (cmd_line, compiled_string, sizeof(compiled_string));
#else
	if ( initrd_size )
		memcpy (cmd_line, ramroot_string, sizeof(ramroot_string));
	else
		memcpy (cmd_line, netroot_string, sizeof(netroot_string));
#endif
	while ( *cp )
		putc(*cp++);
#ifdef INTERACTIVE_CONSOLE
	/*
	 * If they have a console, allow them to edit the command line.
	 * Otherwise, don't bother wasting the five seconds.
	 */
	while (timer++ < 5*1000) {
		if (tstc()) {
			while ((ch = getc()) != '\n' && ch != '\r') {
				if (ch == '\b' || ch == '\177') {
					if (cp != cmd_line) {
						cp--;
						puts("\b \b");
					}
				} else if (ch == '\030'		/* ^x */
					   || ch == '\025') {	/* ^u */
					while (cp != cmd_line) {
						cp--;
						puts("\b \b");
					}
				} else {
					*cp++ = ch;
					putc(ch);
				}
			}
			break;  /* Exit 'timer' loop */
		}
		udelay(1000);  /* 1 msec */
	}
#endif
	*cp = 0;
	puts("\nUncompressing Linux...");

	gunzip(0, 0x400000, zimage_start, &zimage_size);
	flush_instruction_cache();
	puts("done.\n");
	{
		struct bi_record *rec;
		unsigned long initrd_loc;
		unsigned long rec_loc = _ALIGN((unsigned long)(zimage_size) +
				(1 << 20) - 1, (1 << 20));
		rec = (struct bi_record *)rec_loc;

		/* We need to make sure that the initrd and bi_recs do not
		 * overlap. */
		if ( initrd_size ) {
			initrd_loc = (unsigned long)(&__ramdisk_begin);
			/* If the bi_recs are in the middle of the current
			 * initrd, move the initrd to the next MB
			 * boundary. */
			if ((rec_loc > initrd_loc) &&
					((initrd_loc + initrd_size)
					 > rec_loc)) {
				initrd_loc = _ALIGN((unsigned long)(zimage_size)
						+ (2 << 20) - 1, (2 << 20));
			 	memmove((void *)initrd_loc, &__ramdisk_begin,
					 initrd_size);
		         	puts("initrd moved:  "); puthex(initrd_loc);
			 	puts(" "); puthex(initrd_loc + initrd_size);
			 	puts("\n");
			}
		}

		rec->tag = BI_FIRST;
		rec->size = sizeof(struct bi_record);
		rec = (struct bi_record *)((unsigned long)rec + rec->size);

		rec->tag = BI_CMD_LINE;
		memcpy( (char *)rec->data, cmd_line, strlen(cmd_line)+1);
		rec->size = sizeof(struct bi_record) + strlen(cmd_line) + 1;
		rec = (struct bi_record *)((unsigned long)rec + rec->size);

		if ( initrd_size ) {
			rec->tag = BI_INITRD;
			rec->data[0] = initrd_loc;
			rec->data[1] = initrd_size;
			rec->size = sizeof(struct bi_record) + 2 *
				sizeof(unsigned long);
			rec = (struct bi_record *)((unsigned long)rec +
					rec->size);
		}

		rec->tag = BI_LAST;
		rec->size = sizeof(struct bi_record);
		rec = (struct bi_record *)((unsigned long)rec + rec->size);
	}
	puts("Now booting the kernel\n");
	serial_close(com_port);

	return (unsigned long)hold_residual;
}
Esempio n. 12
0
static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int vector,
			     struct ib_ucontext *ib_context,
			     struct ib_udata *udata)
{
	struct iwch_dev *rhp;
	struct iwch_cq *chp;
	struct iwch_create_cq_resp uresp;
	struct iwch_create_cq_req ureq;
	struct iwch_ucontext *ucontext = NULL;

	PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
	rhp = to_iwch_dev(ibdev);
	chp = kzalloc(sizeof(*chp), GFP_KERNEL);
	if (!chp)
		return ERR_PTR(-ENOMEM);

	if (ib_context) {
		ucontext = to_iwch_ucontext(ib_context);
		if (!t3a_device(rhp)) {
			if (ib_copy_from_udata(&ureq, udata, sizeof (ureq))) {
				kfree(chp);
				return ERR_PTR(-EFAULT);
			}
			chp->user_rptr_addr = (u32 __user *)(unsigned long)ureq.user_rptr_addr;
		}
	}

	if (t3a_device(rhp)) {

		/*
		 * T3A: Add some fluff to handle extra CQEs inserted
		 * for various errors.
		 * Additional CQE possibilities:
		 *      TERMINATE,
		 *      incoming RDMA WRITE Failures
		 *      incoming RDMA READ REQUEST FAILUREs
		 * NOTE: We cannot ensure the CQ won't overflow.
		 */
		entries += 16;
	}
	entries = roundup_pow_of_two(entries);
	chp->cq.size_log2 = ilog2(entries);

	if (cxio_create_cq(&rhp->rdev, &chp->cq)) {
		kfree(chp);
		return ERR_PTR(-ENOMEM);
	}
	chp->rhp = rhp;
	chp->ibcq.cqe = 1 << chp->cq.size_log2;
	spin_lock_init(&chp->lock);
	atomic_set(&chp->refcnt, 1);
	init_waitqueue_head(&chp->wait);
	insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);

	if (ucontext) {
		struct iwch_mm_entry *mm;

		mm = kmalloc(sizeof *mm, GFP_KERNEL);
		if (!mm) {
			iwch_destroy_cq(&chp->ibcq);
			return ERR_PTR(-ENOMEM);
		}
		uresp.cqid = chp->cq.cqid;
		uresp.size_log2 = chp->cq.size_log2;
		spin_lock(&ucontext->mmap_lock);
		uresp.key = ucontext->key;
		ucontext->key += PAGE_SIZE;
		spin_unlock(&ucontext->mmap_lock);
		if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
			kfree(mm);
			iwch_destroy_cq(&chp->ibcq);
			return ERR_PTR(-EFAULT);
		}
		mm->key = uresp.key;
		mm->addr = virt_to_phys(chp->cq.queue);
		mm->len = PAGE_ALIGN((1UL << uresp.size_log2) *
					     sizeof (struct t3_cqe));
		insert_mmap(ucontext, mm);
	}
	PDBG("created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx\n",
	     chp->cq.cqid, chp, (1 << chp->cq.size_log2),
	     (unsigned long long) chp->cq.dma_addr);
	return &chp->ibcq;
}
Esempio n. 13
0
/*
 * paging_init() continues the virtual memory environment setup which
 * was begun by the code in arch/head.S.
 */
void __init paging_init(void)
{
	unsigned long zones_size[MAX_NR_ZONES] = { 0, };
	unsigned long min_addr, max_addr;
	unsigned long addr, size, end;
	int i;

#ifdef DEBUG
	printk ("start of paging_init (%p, %lx)\n", kernel_pg_dir, availmem);
#endif

	/* Fix the cache mode in the page descriptors for the 680[46]0.  */
	if (CPU_IS_040_OR_060) {
		int i;
#ifndef mm_cachebits
		mm_cachebits = _PAGE_CACHE040;
#endif
		for (i = 0; i < 16; i++)
			pgprot_val(protection_map[i]) |= _PAGE_CACHE040;
	}

	min_addr = m68k_memory[0].addr;
	max_addr = min_addr + m68k_memory[0].size;
	for (i = 1; i < m68k_num_memory;) {
		if (m68k_memory[i].addr < min_addr) {
			printk("Ignoring memory chunk at 0x%lx:0x%lx before the first chunk\n",
				m68k_memory[i].addr, m68k_memory[i].size);
			printk("Fix your bootloader or use a memfile to make use of this area!\n");
			m68k_num_memory--;
			memmove(m68k_memory + i, m68k_memory + i + 1,
				(m68k_num_memory - i) * sizeof(struct mem_info));
			continue;
		}
		addr = m68k_memory[i].addr + m68k_memory[i].size;
		if (addr > max_addr)
			max_addr = addr;
		i++;
	}
	m68k_memoffset = min_addr - PAGE_OFFSET;
	m68k_virt_to_node_shift = fls(max_addr - min_addr - 1) - 6;

	module_fixup(NULL, __start_fixup, __stop_fixup);
	flush_icache();

	high_memory = phys_to_virt(max_addr);

	min_low_pfn = availmem >> PAGE_SHIFT;
	max_low_pfn = max_addr >> PAGE_SHIFT;

	for (i = 0; i < m68k_num_memory; i++) {
		addr = m68k_memory[i].addr;
		end = addr + m68k_memory[i].size;
		m68k_setup_node(i);
		availmem = PAGE_ALIGN(availmem);
		availmem += init_bootmem_node(NODE_DATA(i),
					      availmem >> PAGE_SHIFT,
					      addr >> PAGE_SHIFT,
					      end >> PAGE_SHIFT);
	}

	/*
	 * Map the physical memory available into the kernel virtual
	 * address space. First initialize the bootmem allocator with
	 * the memory we already mapped, so map_node() has something
	 * to allocate.
	 */
	addr = m68k_memory[0].addr;
	size = m68k_memory[0].size;
	free_bootmem_node(NODE_DATA(0), availmem, min(INIT_MAPPED_SIZE, size) - (availmem - addr));
	map_node(0);
	if (size > INIT_MAPPED_SIZE)
		free_bootmem_node(NODE_DATA(0), addr + INIT_MAPPED_SIZE, size - INIT_MAPPED_SIZE);

	for (i = 1; i < m68k_num_memory; i++)
		map_node(i);

	flush_tlb_all();

	/*
	 * initialize the bad page table and bad page to point
	 * to a couple of allocated pages
	 */
	empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
	memset(empty_zero_page, 0, PAGE_SIZE);

	/*
	 * Set up SFC/DFC registers
	 */
	set_fs(KERNEL_DS);

#ifdef DEBUG
	printk ("before free_area_init\n");
#endif
	for (i = 0; i < m68k_num_memory; i++) {
		zones_size[ZONE_DMA] = m68k_memory[i].size >> PAGE_SHIFT;
		free_area_init_node(i, pg_data_map + i, zones_size,
				    m68k_memory[i].addr >> PAGE_SHIFT, NULL);
	}
}
Esempio n. 14
0
unsigned long
arch_get_unmapped_area(struct file *filp, unsigned long addr,
		unsigned long len, unsigned long pgoff, unsigned long flags)
{
	struct mm_struct *mm = current->mm;
	struct vm_area_struct *vma;
	unsigned long pax_task_size = TASK_SIZE;
	struct vm_unmapped_area_info info;
	unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);

#ifdef CONFIG_PAX_SEGMEXEC
	if (mm->pax_flags & MF_PAX_SEGMEXEC)
		pax_task_size = SEGMEXEC_TASK_SIZE;
#endif

	pax_task_size -= PAGE_SIZE;

	if (len > pax_task_size)
		return -ENOMEM;

	if (flags & MAP_FIXED)
		return addr;

#ifdef CONFIG_PAX_RANDMMAP
	if (!(mm->pax_flags & MF_PAX_RANDMMAP))
#endif

	if (addr) {
		addr = PAGE_ALIGN(addr);
		if (pax_task_size - len >= addr) {
			vma = find_vma(mm, addr);
			if (check_heap_stack_gap(vma, addr, len, offset))
				return addr;
		}
	}

	info.flags = 0;
	info.length = len;
	info.align_mask = filp ? get_align_mask() : 0;
	info.align_offset = pgoff << PAGE_SHIFT;
	info.threadstack_offset = offset;

#ifdef CONFIG_PAX_PAGEEXEC
	if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE)) {
		info.low_limit = 0x00110000UL;
		info.high_limit = mm->start_code;

#ifdef CONFIG_PAX_RANDMMAP
		if (mm->pax_flags & MF_PAX_RANDMMAP)
			info.low_limit += mm->delta_mmap & 0x03FFF000UL;
#endif

		if (info.low_limit < info.high_limit) {
			addr = vm_unmapped_area(&info);
			if (!IS_ERR_VALUE(addr))
				return addr;
		}
	} else
#endif

	info.low_limit = mm->mmap_base;
	info.high_limit = pax_task_size;

	return vm_unmapped_area(&info);
}
Esempio n. 15
0
static int busname_peek_message(BusName *n) {
        struct kdbus_cmd_recv cmd_recv = {
                .size = sizeof(cmd_recv),
                .flags = KDBUS_RECV_PEEK,
        };
        struct kdbus_cmd_free cmd_free = {
                .size = sizeof(cmd_free),
        };
        const char *comm = NULL;
        struct kdbus_item *d;
        struct kdbus_msg *k;
        size_t start, ps, sz, delta;
        void *p = NULL;
        pid_t pid = 0;
        int r;

        /* Generate a friendly debug log message about which process
         * caused triggering of this bus name. This simply peeks the
         * metadata of the first queued message and logs it. */

        assert(n);

        /* Let's shortcut things a bit, if debug logging is turned off
         * anyway. */

        if (log_get_max_level() < LOG_DEBUG)
                return 0;

        r = ioctl(n->starter_fd, KDBUS_CMD_RECV, &cmd_recv);
        if (r < 0) {
                if (errno == EINTR || errno == EAGAIN)
                        return 0;

                return log_unit_error_errno(UNIT(n), errno, "Failed to query activation message: %m");
        }

        /* We map as late as possible, and unmap imemdiately after
         * use. On 32bit address space is scarce and we want to be
         * able to handle a lot of activator connections at the same
         * time, and hence shouldn't keep the mmap()s around for
         * longer than necessary. */

        ps = page_size();
        start = (cmd_recv.msg.offset / ps) * ps;
        delta = cmd_recv.msg.offset - start;
        sz = PAGE_ALIGN(delta + cmd_recv.msg.msg_size);

        p = mmap(NULL, sz, PROT_READ, MAP_SHARED, n->starter_fd, start);
        if (p == MAP_FAILED) {
                r = log_unit_error_errno(UNIT(n), errno, "Failed to map activation message: %m");
                goto finish;
        }

        k = (struct kdbus_msg *) ((uint8_t *) p + delta);
        KDBUS_ITEM_FOREACH(d, k, items) {
                switch (d->type) {

                case KDBUS_ITEM_PIDS:
                        pid = d->pids.pid;
                        break;

                case KDBUS_ITEM_PID_COMM:
                        comm = d->str;
                        break;
                }
        }

        if (pid > 0)
                log_unit_debug(UNIT(n), "Activation triggered by process " PID_FMT " (%s)", pid, strna(comm));

        r = 0;

finish:
        if (p)
                (void) munmap(p, sz);

        cmd_free.offset = cmd_recv.msg.offset;
        if (ioctl(n->starter_fd, KDBUS_CMD_FREE, &cmd_free) < 0)
                log_unit_warning(UNIT(n), "Failed to free peeked message, ignoring: %m");

        return r;
}

static int busname_dispatch_io(sd_event_source *source, int fd, uint32_t revents, void *userdata) {
        BusName *n = userdata;

        assert(n);
        assert(fd >= 0);

        if (n->state != BUSNAME_LISTENING)
                return 0;

        log_unit_debug(UNIT(n), "Activation request");

        if (revents != EPOLLIN) {
                log_unit_error(UNIT(n), "Got unexpected poll event (0x%x) on starter fd.", revents);
                goto fail;
        }

        busname_peek_message(n);
        busname_enter_running(n);
        return 0;
fail:

        busname_enter_dead(n, BUSNAME_FAILURE_RESOURCES);
        return 0;
}

static void busname_sigchld_event(Unit *u, pid_t pid, int code, int status) {
        BusName *n = BUSNAME(u);
        BusNameResult f;

        assert(n);
        assert(pid >= 0);

        if (pid != n->control_pid)
                return;

        n->control_pid = 0;

        if (is_clean_exit(code, status, NULL))
                f = BUSNAME_SUCCESS;
        else if (code == CLD_EXITED)
                f = BUSNAME_FAILURE_EXIT_CODE;
        else if (code == CLD_KILLED)
                f = BUSNAME_FAILURE_SIGNAL;
        else if (code == CLD_DUMPED)
                f = BUSNAME_FAILURE_CORE_DUMP;
        else
                assert_not_reached("Unknown sigchld code");

        log_unit_full(u, f == BUSNAME_SUCCESS ? LOG_DEBUG : LOG_NOTICE, 0,
                      "Control process exited, code=%s status=%i", sigchld_code_to_string(code), status);

        if (f != BUSNAME_SUCCESS)
                n->result = f;

        switch (n->state) {

        case BUSNAME_MAKING:
                if (f == BUSNAME_SUCCESS)
                        busname_enter_listening(n);
                else
                        busname_enter_signal(n, BUSNAME_SIGTERM, f);
                break;

        case BUSNAME_SIGTERM:
        case BUSNAME_SIGKILL:
                busname_enter_dead(n, f);
                break;

        default:
                assert_not_reached("Uh, control process died at wrong time.");
        }

        /* Notify clients about changed exit status */
        unit_add_to_dbus_queue(u);
}
Esempio n. 16
0
/**
 * @brief   Chunkmem device ioctl function
 */
static long chunkmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
	chunk_block_t block;
	void *ka;           /* kernel_addr */
	unsigned int va;    /* user_addr */
	unsigned int pa;    /* phy_addr*/
	long ret = 0;
	unsigned int offset = 0;

	switch (cmd) {
	case CHUNK_MEM_ALLOC:
	case CHUNK_MEM_SHARE:
	case CHUNK_MEM_MMAP:
		{
			if (copy_from_user(&block, (void __user*)arg, sizeof(block))) {
				ret = -EFAULT;
				break;
			}

			/* alloc|share|mmap memory */
			if (cmd == CHUNK_MEM_MMAP) {
				DIAG_VERB("CHUNK_MEM_MMAP:\n");
				ka = gp_chunk_va(block.phy_addr);
				if (ka == NULL) {
					DIAG_ERROR("CHUNK_MEM_MMAP: bad address! (%s:%08X)\n", current->comm, block.phy_addr);
					ret = -EFAULT; /* mmap fail */
					break;
				}
				/* page alignment */
				offset = block.phy_addr & ~PAGE_MASK;
				ka = (void *)((unsigned long)ka & PAGE_MASK);
				DIAG_VERB("CHUNK_MEM_MMAP: phy_addr                  = %08X\n", block.phy_addr);
				DIAG_VERB("CHUNK_MEM_MMAP: size                      = %08X\n", block.size);
				DIAG_VERB("CHUNK_MEM_MMAP: ka                        = %08X\n", (unsigned int)ka);
				DIAG_VERB("CHUNK_MEM_MMAP: offset                    = %08X\n", offset);
				DIAG_VERB("CHUNK_MEM_MMAP: PAGE_ALIGN(size + offset) = %08X\n", PAGE_ALIGN(block.size + offset));
			}
			else {
				if (cmd == CHUNK_MEM_ALLOC) {
					DIAG_VERB("CHUNK_MEM_ALLOC:\n");
					DIAG_VERB("size = %08X (%d)\n", block.size, block.size);
					ka = gp_chunk_malloc(current->tgid, block.size);
					DIAG_VERB("gp_chunk_malloc return ka=%08X\n", ka);
					if (ka == NULL) {
						DIAG_ERROR("CHUNK_MEM_ALLOC: out of memory! (%s:%08X)\n", current->comm, block.size);
						dlMalloc_Status(NULL);
						ret = -ENOMEM;
						break;
					}
					block.phy_addr = gp_chunk_pa(ka);
				}
				else { /* CHUNK_MEM_SHARE */
					DIAG_VERB("CHUNK_MEM_SHARE:\n");
					ka = gp_chunk_va(block.phy_addr);
					if ((ka == NULL) || (dlShare(ka) == 0)) {
						DIAG_ERROR("CHUNK_MEM_SHARE: bad address! (%s:%08X)\n", current->comm, block.phy_addr);
						ret = -EFAULT; /* share fail */
						break;
					}
				}
				block.size = dlMalloc_Usable_Size(ka) & PAGE_MASK; /* actual allocated size */
				DIAG_VERB("actual size = %08X (%d)\n", block.size, block.size);
				DIAG_VERB("ka = %08X\n", (unsigned int)ka);
			}

			/* mmap to userspace */
			down(&chunkmem->sem);
			down_write(&current->mm->mmap_sem);
			chunkmem->mmap_enable = 1; /* enable mmap in CHUNK_MEM_ALLOC */
			va = do_mmap_pgoff(
				file, 0, PAGE_ALIGN(block.size + offset),
				PROT_READ|PROT_WRITE,
				MAP_SHARED,
				(ka - chunkmem->vbase) >> PAGE_SHIFT);
			chunkmem->mmap_enable = 0; /* disable it */
			up_write(&current->mm->mmap_sem);
			up(&chunkmem->sem);
			if (IS_ERR_VALUE(va)) {
				ret = va; /* errcode */
				DIAG_ERROR("%s: chunkmem mmap fail(%d)! (%s)\n",
						   (cmd == CHUNK_MEM_MMAP) ? "CHUNK_MEM_MMAP" : ((cmd == CHUNK_MEM_ALLOC) ? "CHUNK_MEM_ALLOC" : "CHUNK_MEM_SHARE"),
						   ret, current->comm);
				break;
			}
			va += offset;
			block.addr = (void *)va;
			DIAG_VERB("va = %08X\n\n", va);

			if (copy_to_user((void __user*)arg, &block, sizeof(block))) {
				ret = -EFAULT;
				break;
			}
		}
		break;

	case CHUNK_MEM_FREE:
		{
			if (copy_from_user(&block, (void __user*)arg, sizeof(block))) {
				ret = -EFAULT;
				break;
			}

			/* translate user_va to ka */
			DIAG_VERB("CHUNK_MEM_FREE:\n");
			DIAG_VERB("va = %08X\n", (unsigned int)block.addr);
			pa = gp_user_va_to_pa(block.addr);    /* user_addr to phy_addr */
			if (pa == 0) {
				DIAG_ERROR("CHUNK_MEM_FREE: chunkmem user_va_to_pa fail! (%s:%08X)\n", current->comm, block.addr);
				ret = -EFAULT;
				break;
			}
			DIAG_VERB("pa = %08X\n", pa);
			ka = gp_chunk_va(pa);                  /* phy_addr to kernel_addr */
			if (ka == NULL) {
				DIAG_ERROR("CHUNK_MEM_FREE: not a chunkmem address! (%s:%08X)\n", current->comm, pa);
				ret = -EFAULT;
				break;
			}
			block.size = dlMalloc_Usable_Size(ka) & PAGE_MASK;
			DIAG_VERB("ka = %08X\n", (unsigned int)ka);
			DIAG_VERB("actual size = %08X (%d)\n\n", block.size, block.size);

			/* munmap memory */
			down_write(&current->mm->mmap_sem);
			do_munmap(current->mm, (unsigned int)block.addr, block.size);
			up_write(&current->mm->mmap_sem);

			/* free memory */
			gp_chunk_free(ka);
#if (DIAG_LEVEL >= DIAG_LVL_VERB) && !defined(DIAG_VERB_OFF)
			dlMalloc_Status(NULL);
#endif
		}
		break;

	case CHUNK_MEM_INFO:
		{
			chunk_info_t info;

            if (copy_from_user(&info, (void __user*)arg, sizeof(info))) {
                ret = -EFAULT;
                break;
            }

            if (info.pid == (unsigned int)(-1)) {
                info.pid = current->tgid;
            }

#if CHUNK_SUSPEND_TEST
			if (info.pid) {
				dlMalloc_Status(NULL);
			}
			else {
				gp_chunk_suspend(my_save_data);
				memset(chunkmem->vbase, 0, chunkmem->size);
				/* restore */
				while (blocks != NULL) {
					data_block_t *block = blocks;
					blocks = block->next;
					DIAG_DEBUG("restore data: %p %08X\n", block->addr, block->size);
					memcpy(block->addr, &block->data, block->size);
					kfree(block);
				}
			}
#else
			down(&chunkmem->sem);
			dlMalloc_Status((mem_info_t *)&info);
			up(&chunkmem->sem);
#endif
			if (copy_to_user((void __user*)arg, &info, sizeof(info))) {
				ret = -EFAULT;
				break;
			}
		}
		break;

	case CHUNK_MEM_VA2PA:
		{
			ret = -EFAULT;
			if (copy_from_user(&block, (void __user*)arg, sizeof(block))) {
				break;
			}

			pa = gp_user_va_to_pa(block.addr);    /* user_addr to phy_addr */
			if (pa != 0) {
				ka = gp_chunk_va(pa);             /* phy_addr to kernel_addr */
				if (ka != NULL) {
					block.phy_addr = pa;
					if (copy_to_user((void __user*)arg, &block, sizeof(block)) == 0) {
						ret = 0;
					}
				}
			}
		}
		break;

	case CHUNK_MEM_MUNMAP:
		{
			if (copy_from_user(&block, (void __user*)arg, sizeof(block))) {
				ret = -EFAULT;
				break;
			}

			va = (unsigned int)block.addr;
			/* page alignment */
			offset = va & ~PAGE_MASK;
			va &= PAGE_MASK;

			/* munmap memory */
			down_write(&current->mm->mmap_sem);
			do_munmap(current->mm, va, PAGE_ALIGN(block.size + offset));
			up_write(&current->mm->mmap_sem);
		}
		break;
	
	case CHUNK_MEM_FREEALL:
		gp_chunk_free_all((unsigned int)arg);
		printk(KERN_WARNING "CHUNK_MEM_FREEALL(%ld)\n", arg);
		break;
	
	case CHUNK_MEM_DUMP:
		dlMalloc_Status(0);
		break;
	
	default:
		ret = -ENOTTY; /* Inappropriate ioctl for device */
		break;
	}

	return ret;
}
Esempio n. 17
0
Manager *manager_new(void) {
        Manager *m;
        int r;

        m = new0(Manager, 1);
        if (!m)
                return NULL;

        m->console_active_fd = -1;
        m->reserve_vt_fd = -1;

        m->n_autovts = 6;
        m->reserve_vt = 6;
        m->remove_ipc = true;
        m->inhibit_delay_max = 5 * USEC_PER_SEC;
        m->handle_power_key = HANDLE_POWEROFF;
        m->handle_suspend_key = HANDLE_SUSPEND;
        m->handle_hibernate_key = HANDLE_HIBERNATE;
        m->handle_lid_switch = HANDLE_SUSPEND;
        m->handle_lid_switch_docked = HANDLE_IGNORE;
        m->lid_switch_ignore_inhibited = true;
        m->holdoff_timeout_usec = 30 * USEC_PER_SEC;

        m->idle_action_usec = 30 * USEC_PER_MINUTE;
        m->idle_action = HANDLE_IGNORE;
        m->idle_action_not_before_usec = now(CLOCK_MONOTONIC);

        m->runtime_dir_size = PAGE_ALIGN((size_t) (physical_memory() / 10)); /* 10% */

        m->devices = hashmap_new(&string_hash_ops);
        m->seats = hashmap_new(&string_hash_ops);
        m->sessions = hashmap_new(&string_hash_ops);
        m->users = hashmap_new(NULL);
        m->inhibitors = hashmap_new(&string_hash_ops);
        m->buttons = hashmap_new(&string_hash_ops);

        m->user_units = hashmap_new(&string_hash_ops);
        m->session_units = hashmap_new(&string_hash_ops);

        m->busnames = set_new(&string_hash_ops);

        if (!m->devices || !m->seats || !m->sessions || !m->users || !m->inhibitors || !m->buttons || !m->busnames ||
            !m->user_units || !m->session_units)
                goto fail;

        m->kill_exclude_users = strv_new("root", NULL);
        if (!m->kill_exclude_users)
                goto fail;

        m->udev = udev_new();
        if (!m->udev)
                goto fail;

        r = sd_event_default(&m->event);
        if (r < 0)
                goto fail;

        sd_event_set_watchdog(m->event, true);

        return m;

fail:
        manager_free(m);
        return NULL;
}
Esempio n. 18
0
struct bi_record *
decompress_kernel(unsigned long load_addr, int num_words, unsigned long cksum)
{
#ifdef INTERACTIVE_CONSOLE
	int timer = 0;
	char ch;
#endif
	char *cp;
	struct bi_record *rec;
	unsigned long initrd_loc = 0, TotalMemory = 0;

#if defined(CONFIG_SERIAL_8250_CONSOLE) || defined(CONFIG_SERIAL_MPSC_CONSOLE)
	com_port = serial_init(0, NULL);
#endif

#if defined(CONFIG_44x) && defined(PPC44x_EMAC0_MR0)
	/* Reset MAL */
	mtdcr(DCRN_MALCR(DCRN_MAL_BASE), MALCR_MMSR);
	/* Wait for reset */
	while (mfdcr(DCRN_MALCR(DCRN_MAL_BASE)) & MALCR_MMSR) {};
	/* Reset EMAC */
	*(volatile unsigned long *)PPC44x_EMAC0_MR0 = 0x20000000;
	__asm__ __volatile__("eieio");
#endif

	/*
	 * Call get_mem_size(), which is memory controller dependent,
	 * and we must have the correct file linked in here.
	 */
	TotalMemory = get_mem_size();

	/* assume the chunk below 8M is free */
	end_avail = (char *)0x00800000;

	/*
	 * Reveal where we were loaded at and where we
	 * were relocated to.
	 */
	puts("loaded at:     "); puthex(load_addr);
	puts(" "); puthex((unsigned long)(load_addr + (4*num_words)));
	puts("\n");
	if ( (unsigned long)load_addr != (unsigned long)&start )
	{
		puts("relocated to:  "); puthex((unsigned long)&start);
		puts(" ");
		puthex((unsigned long)((unsigned long)&start + (4*num_words)));
		puts("\n");
	}

	/*
	 * We link ourself to 0x00800000.  When we run, we relocate
	 * ourselves there.  So we just need __image_begin for the
	 * start. -- Tom
	 */
	zimage_start = (char *)(unsigned long)(&__image_begin);
	zimage_size = (unsigned long)(&__image_end) -
			(unsigned long)(&__image_begin);

	initrd_size = (unsigned long)(&__ramdisk_end) -
		(unsigned long)(&__ramdisk_begin);

	/*
	 * The zImage and initrd will be between start and _end, so they've
	 * already been moved once.  We're good to go now. -- Tom
	 */
	avail_ram = (char *)PAGE_ALIGN((unsigned long)_end);
	puts("zimage at:     "); puthex((unsigned long)zimage_start);
	puts(" "); puthex((unsigned long)(zimage_size+zimage_start));
	puts("\n");

	if ( initrd_size ) {
		puts("initrd at:     ");
		puthex((unsigned long)(&__ramdisk_begin));
		puts(" "); puthex((unsigned long)(&__ramdisk_end));puts("\n");
	}

	avail_ram = (char *)0x00400000;
	end_avail = (char *)0x00800000;
	puts("avail ram:     "); puthex((unsigned long)avail_ram); puts(" ");
	puthex((unsigned long)end_avail); puts("\n");

	if (keyb_present)
		CRT_tstc();  /* Forces keyboard to be initialized */
#ifdef CONFIG_GEMINI
	/*
	 * If cmd_line is empty and cmd_preset is not, copy cmd_preset
	 * to cmd_line.  This way we can override cmd_preset with the
	 * command line from Smon.
	 */

	if ( (cmd_line[0] == '\0') && (cmd_preset[0] != '\0'))
		memcpy (cmd_line, cmd_preset, sizeof(cmd_preset));
#endif

	/* Display standard Linux/PPC boot prompt for kernel args */
	puts("\nLinux/PPC load: ");
	cp = cmd_line;
	memcpy (cmd_line, cmd_preset, sizeof(cmd_preset));
	while ( *cp ) putc(*cp++);

#ifdef INTERACTIVE_CONSOLE
	/*
	 * If they have a console, allow them to edit the command line.
	 * Otherwise, don't bother wasting the five seconds.
	 */
	while (timer++ < 5*1000) {
		if (tstc()) {
			while ((ch = getc()) != '\n' && ch != '\r') {
				/* Test for backspace/delete */
				if (ch == '\b' || ch == '\177') {
					if (cp != cmd_line) {
						cp--;
						puts("\b \b");
					}
				/* Test for ^x/^u (and wipe the line) */
				} else if (ch == '\030' || ch == '\025') {
					while (cp != cmd_line) {
						cp--;
						puts("\b \b");
					}
				} else {
					*cp++ = ch;
					putc(ch);
				}
			}
			break;  /* Exit 'timer' loop */
		}
		udelay(1000);  /* 1 msec */
	}
	*cp = 0;
#endif
	puts("\n");

	puts("Uncompressing Linux...");
	gunzip(0x0, 0x400000, zimage_start, &zimage_size);
	puts("done.\n");

	/* get the bi_rec address */
	rec = bootinfo_addr(zimage_size);

	/* We need to make sure that the initrd and bi_recs do not
	 * overlap. */
	if ( initrd_size ) {
		unsigned long rec_loc = (unsigned long) rec;
		initrd_loc = (unsigned long)(&__ramdisk_begin);
		/* If the bi_recs are in the middle of the current
		 * initrd, move the initrd to the next MB
		 * boundary. */
		if ((rec_loc > initrd_loc) &&
				((initrd_loc + initrd_size) > rec_loc)) {
			initrd_loc = _ALIGN((unsigned long)(zimage_size)
					+ (2 << 20) - 1, (2 << 20));
		 	memmove((void *)initrd_loc, &__ramdisk_begin,
				 initrd_size);
	         	puts("initrd moved:  "); puthex(initrd_loc);
		 	puts(" "); puthex(initrd_loc + initrd_size);
		 	puts("\n");
		}
	}

	bootinfo_init(rec);
	if ( TotalMemory )
		bootinfo_append(BI_MEMSIZE, sizeof(int), (void*)&TotalMemory);

	bootinfo_append(BI_CMD_LINE, strlen(cmd_line)+1, (void*)cmd_line);

	/* add a bi_rec for the initrd if it exists */
	if (initrd_size) {
		unsigned long initrd[2];

		initrd[0] = initrd_loc;
		initrd[1] = initrd_size;

		bootinfo_append(BI_INITRD, sizeof(initrd), &initrd);
	}
	puts("Now booting the kernel\n");
	serial_close(com_port);

	return rec;
}
Esempio n. 19
0
unsigned long
decompress_kernel(unsigned long load_addr, int num_words, unsigned long cksum, RESIDUAL *residual)
{
	int timer;
	extern unsigned long start;
	char *cp, ch;
	unsigned long i, motorola_id = 0;
	char needs_reloc = 0;
	BATU *u;
	BATL *l;
	char	*dp;

	lines = 25;
	cols = 80;
	orig_x = 0;
	orig_y = 24;

	/* Grab some space for the command line and board info.  Since
	 * we no longer use the ELF header, but it was loaded, grab
	 * that space.
	 */
	cmd_line = (char *)(load_addr - 0x10000);
	hold_residual = (RESIDUAL *)(cmd_line + sizeof(cmd_buf));
	/* copy board data */
	if (residual)
		memcpy(hold_residual,residual,sizeof(bd_t));

	/* MBX/prep sometimes put the residual/board info at the end of mem 
	 * assume 16M for now  -- Cort
	 * To boot on standard MBX boards with 4M, we can't use initrd,
	 * and we have to assume less memory.  -- Dan
	 */
	if ( INITRD_OFFSET )
		end_avail = (char *)0x01000000;
	else
		end_avail = (char *)0x00400000;

	/* let residual data tell us it's higher */
	if ( (unsigned long)residual > 0x00800000 )
		end_avail = (char *)PAGE_ALIGN((unsigned long)residual);

	puts("loaded at:     "); puthex(load_addr);
	puts(" "); puthex((unsigned long)(load_addr + (4*num_words))); puts("\n");
	if ( (unsigned long)load_addr != (unsigned long)&start )
	{
		puts("relocated to:  "); puthex((unsigned long)&start);
		puts(" ");
		puthex((unsigned long)((unsigned long)&start + (4*num_words)));
		puts("\n");
	}

	if ( residual )
	{
		puts("board data at: "); puthex((unsigned long)residual);
		puts(" ");
		puthex((unsigned long)((unsigned long)residual + sizeof(bd_t)));
		puts("\n");
		puts("relocated to:  ");
		puthex((unsigned long)hold_residual);
		puts(" ");
		puthex((unsigned long)((unsigned long)hold_residual + sizeof(bd_t)));
		puts("\n");
	}

	/* we have to subtract 0x10000 here to correct for objdump including the
	   size of the elf header which we strip -- Cort */
	zimage_start = (char *)(load_addr - 0x10000 + ZIMAGE_OFFSET);
	zimage_size = ZIMAGE_SIZE;

	if ( INITRD_OFFSET )
		initrd_start = load_addr - 0x10000 + INITRD_OFFSET;
	else
		initrd_start = 0;
	initrd_end = INITRD_SIZE + initrd_start;

	/*
	 * setup avail_ram - this is the first part of ram usable
	 * by the uncompress code. -- Cort
	 */
	avail_ram = (char *)PAGE_ALIGN((unsigned long)zimage_start+zimage_size);
	if ( ((load_addr+(num_words*4)) > (unsigned long) avail_ram)
		&& (load_addr <= 0x01000000) )
		avail_ram = (char *)(load_addr+(num_words*4));
	if ( (((unsigned long)&start+(num_words*4)) > (unsigned long) avail_ram)
		&& (load_addr <= 0x01000000) )
		avail_ram = (char *)((unsigned long)&start+(num_words*4));
	
	/* relocate zimage */
	puts("zimage at:     "); puthex((unsigned long)zimage_start);
	puts(" "); puthex((unsigned long)(zimage_size+zimage_start)); puts("\n");
	/*
	 * don't relocate the zimage if it was loaded above 16M since
	 * things get weird if we try to relocate -- Cort
	 * We don't relocate zimage on a base MBX board because of
	 * insufficient memory.  In this case we don't have initrd either,
	 * so use that as an indicator.  -- Dan
	 */
	
	/* Determine if we have a Motorola board */
	needs_reloc = 0;
	if ( (( (unsigned long)zimage_start <= 0x01000000 ) && initrd_start)
		|| needs_reloc)
	{
		memcpy ((void *)PAGE_ALIGN(-PAGE_SIZE+(unsigned long)end_avail-zimage_size),
			(void *)zimage_start, zimage_size );	
		zimage_start = (char *)PAGE_ALIGN(-PAGE_SIZE+(unsigned long)end_avail-zimage_size);
		end_avail = (char *)zimage_start;
		puts("relocated to:  "); puthex((unsigned long)zimage_start);
		puts(" ");
		puthex((unsigned long)zimage_size+(unsigned long)zimage_start);
		puts("\n");
	}

	/* relocate initrd */
	if ( initrd_start )
	{
		puts("initrd at:     "); puthex(initrd_start);
		puts(" "); puthex(initrd_end); puts("\n");
		/*
		 * Memory is really tight on the MBX (we can assume 4M)
		 * so put the initrd at the TOP of ram, and set end_avail
		 * to right after that.
		 *
		 * I should do something like this for prep, too and keep
		 * a variable end_of_DRAM to keep track of what we think the
		 * max ram is.
		 * -- Cort
		 */
		if (needs_reloc)
		{
			memcpy ((void *)PAGE_ALIGN(-PAGE_SIZE+
				(unsigned long)end_avail-INITRD_SIZE),
				(void *)initrd_start,
				INITRD_SIZE );
			initrd_start = PAGE_ALIGN(-PAGE_SIZE+
				(unsigned long)end_avail-INITRD_SIZE);
			initrd_end = initrd_start + INITRD_SIZE;
			end_avail = (char *)initrd_start;
			puts("relocated to:  "); puthex(initrd_start);
			puts(" "); puthex(initrd_end); puts("\n");
		}
	}

	puts("avail ram:     "); puthex((unsigned long)avail_ram); puts(" ");
	puthex((unsigned long)end_avail); puts("\n");

	puts("\nLinux/PPC load: ");
	timer = 0;
	cp = cmd_line;
	memcpy (cmd_line, cmd_preset, sizeof(cmd_preset));
	while ( *cp ) putc(*cp++);
	while (timer++ < 5*1000) {
		if (tstc()) {
			while ((ch = getc()) != '\n' && ch != '\r') {
				if (ch == '\b') {
					if (cp != cmd_line) {
						cp--;
						puts("\b \b");
					}
				  } else if (ch == '?') {
					if (!do_ipaddrs(&cp, 1)) {
						  *cp++ = ch;
						  putc(ch);
					}
				} else {
					*cp++ = ch;
					putc(ch);
				}
			}
			break;  /* Exit 'timer' loop */
		}
		udelay(1000);  /* 1 msec */
	}
	*cp = 0;
	/* The MBX does not currently have any default boot strategy.
	 * If the command line is not filled in, we will automatically
	 * create the default network boot.
	 */
	if (cmd_line[0] == 0) {
		dp = root_string;
		while (*dp != 0)
			*cp++ = *dp++;
		*cp++ = ' ';

		dp = nfsaddrs_string;
		while (*dp != 0)
			*cp++ = *dp++;
		dp = cp;
		do_ipaddrs(&cp, 0);
		*cp++ = ' ';

		/* Add the server address to the root file system path.
		*/
		dp = strrchr(dp, ':');
		dp++;
		do_nfsroot(&cp, dp);
		*cp = 0;
	}
	puts("\n");

	/* mappings on early boot can only handle 16M */
	if ( (int)(cmd_line[0]) > (16<<20))
		puts("cmd_line located > 16M\n");
	if ( (int)hold_residual > (16<<20))
		puts("hold_residual located > 16M\n");
	if ( initrd_start > (16<<20))
		puts("initrd_start located > 16M\n");
       
	puts("Uncompressing Linux...");

	gunzip(0, 0x400000, zimage_start, &zimage_size);
	puts("done.\n");
	puts("Now booting the kernel\n");
	return (unsigned long)hold_residual;
}
Esempio n. 20
0
static int lgx_alloc_buffer(struct inno_lgx *lgx)
{
	//int i,j;
	//int page_num;
	//struct page *page = NULL;
	int i;
	struct inno_buffer *inno_buf = &lgx->inno_buffer;
	//int ret = 0;

	memset(inno_buf, 0, sizeof(struct inno_buffer));
	sema_init(&inno_buf->sem,1);
	down(&inno_buf->sem);

	// alloc buffer
#if 0                        //xingyu buffer issue
	page_num = PAGE_ALIGN(INNO_BUFFER_SIZE) / PAGE_SIZE;
	inno_buf->pages = (struct page **)kzalloc(page_num * sizeof(struct page *), GFP_KERNEL);         
	if (!inno_buf->pages) {
		inno_err("lgx_alloc_buffer:No enough memory");
		ret = -ENOMEM;
		goto alloc_node_error;
	}

	for (i = 0; i < page_num; i++) {
		page = alloc_page(GFP_KERNEL);

		if (!page) {
			inno_err("lgx_alloc_buffer:No enough page");
			ret = -ENOMEM;
			goto alloc_pages_error;
		}
		//SetPageReserved(page);
		inno_buf->pages[i] = page;
	}

	inno_buf->page_num = page_num;

	inno_buf->bufsize = page_num * PAGE_SIZE;
	inno_buf->vaddr = vmap(inno_buf->pages, page_num, VM_MAP, PAGE_KERNEL);

	/* check if the memory map is OK. */
	if (!inno_buf->vaddr) {
		inno_err("lgx_alloc_buffer:vmap() failure");
		ret = -EFAULT;
		goto vmap_error;
	}

	memset(inno_buf->vaddr, 0, inno_buf->bufsize);

	inno_buf->start = inno_buf->vaddr;
#else	
#ifndef _buffer_global                                                      // buffer alloc modify xingyu 0714
	inno_buf->vaddr = kmalloc(INNO_BUFFER_SIZE, GFP_KERNEL| GFP_DMA);         
	if(inno_buf->vaddr == NULL || (int)inno_buf->vaddr %4 !=0){
		inno_err("inno_buf->vaddr kmalloc fail");
		up(&inno_buf->sem);
		return -1;
	}			 
#else
	for(i=0; lgx_ids_table[i].name!=NULL; i++){
		if(lgx->ids  == &lgx_ids_table[i]){
			inno_buf->vaddr =i*INNO_BUFFER_SIZE+g_inno_buffer;
			inno_msg("use global mem");
			break;
		}
	}
#endif
	inno_buf->start = inno_buf->vaddr;
	inno_buf->bufsize = INNO_BUFFER_SIZE;
	
#endif
	up(&inno_buf->sem);

	return 0;

#if 0                          //xingyu buffer issue
	//vmap_error:
	//alloc_pages_error:
	for (j = 0; j < i; j++) {
		page = inno_buf->pages[j];
		//ClearPageReserved(page);
		__free_page(page);
	}
	kfree(inno_buf->pages);
	//alloc_node_error:
	return ret;
#endif
}
Esempio n. 21
0
/*
 static void mlx4_unmap_uar(struct mlx4_priv *priv)
 {
 struct mlx4_priv *priv = mlx4_priv(&priv->dev);
 int i;

 for (i = 0; i < mlx4_num_eq_uar(&priv->dev); ++i)
 if (priv->eq_table.uar_map[i]) {
 iounmap(priv->eq_table.uar_map[i]);
 priv->eq_table.uar_map[i] = NULL;
 }
 }
 */
static int mlx4_create_eq(struct mlx4_priv *priv, int nent, u8 intr,
		struct mlx4_eq *eq) {
	struct mlx4_cmd_mailbox *mailbox;
	struct mlx4_eq_context *eq_context;
	int npages;
	u64 *dma_list = NULL;
	genpaddr_t t = 0;
	u64 mtt_addr;
	int err = -ENOMEM;
	int i;

	eq->priv = priv;
	eq->nent = roundup_pow_of_two(max(nent, 2));
	/* CX3 is capable of extending the CQE\EQE from 32 to 64 bytes*/
	npages = PAGE_ALIGN(
			eq->nent * (MLX4_EQ_ENTRY_SIZE << priv->dev.caps.eqe_factor))
			/ BASE_PAGE_SIZE;

	eq->page_list = malloc(npages * sizeof *eq->page_list);
	if (!eq->page_list)
		goto err_out;

	for (i = 0; i < npages; ++i)
		eq->page_list[i].buf = NULL;

	dma_list = malloc(npages * sizeof *dma_list);
	if (!dma_list)
		goto err_out_free;

	mailbox = mlx4_alloc_cmd_mailbox();
	if (IS_ERR(mailbox))
		goto err_out_free;
	eq_context = mailbox->buf;

	for (i = 0; i < npages; ++i) {
		eq->page_list[i].buf = dma_alloc(BASE_PAGE_SIZE, &t);
		if (!eq->page_list[i].buf)
			goto err_out_free_pages;

		dma_list[i] = t;
		eq->page_list[i].map = t;

		memset(eq->page_list[i].buf, 0, BASE_PAGE_SIZE);
	}

	eq->eqn = mlx4_bitmap_alloc(&priv->eq_table.bitmap);
	if (eq->eqn == -1)
		goto err_out_free_pages;

	eq->doorbell = mlx4_get_eq_uar(priv, eq);
	if (!eq->doorbell) {
		err = -ENOMEM;
		goto err_out_free_eq;
	}

	err = mlx4_mtt_init(&priv->dev, npages, PAGE_SHIFT, &eq->mtt);
	if (err)
		goto err_out_free_eq;

	err = mlx4_write_mtt(&priv->dev, &eq->mtt, 0, npages, dma_list);
	if (err)
		goto err_out_free_mtt;

	memset(eq_context, 0, sizeof *eq_context);
	eq_context->flags = cpu_to_be32(MLX4_EQ_STATUS_OK |
	MLX4_EQ_STATE_ARMED);
	eq_context->log_eq_size = ilog2(eq->nent);
	eq_context->intr = intr;
	eq_context->log_page_size = PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT;

	/*printf("mtt_addr: %lx\n", mlx4_mtt_addr(&priv->dev, &eq->mtt));
	 printf("off: %d\n", eq->mtt.offset);
	 printf("size: %d\n", priv->dev.caps.mtt_entry_sz);*/

	mtt_addr = mlx4_mtt_addr(&priv->dev, &eq->mtt);
	eq_context->mtt_base_addr_h = mtt_addr >> 32;
	eq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);

	err = mlx4_SW2HW_EQ(priv, mailbox, eq->eqn);
	if (err) {
		MLX4_DEBUG("SW2HW_EQ failed (%d)\n", err);
		goto err_out_free_mtt;
	}

	free(dma_list);
	mlx4_free_cmd_mailbox(mailbox);

	eq->cons_index = 0;

	return err;

	/*TODO*/
	err_out_free_mtt: /*mlx4_mtt_cleanup(&priv->dev, &eq->mtt);*/

	err_out_free_eq: /*mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn,
	 MLX4_USE_RR);*/

	err_out_free_pages: /*for (i = 0; i < npages; ++i)
	 if (eq->page_list[i].buf)
	 dma_free(&priv->dev.pdev->dev, PAGE_SIZE,
	 eq->page_list[i].buf, eq->page_list[i].map);*/

	mlx4_free_cmd_mailbox(mailbox);

	err_out_free: free(eq->page_list);
	free(dma_list);

	err_out: return err;
}
Esempio n. 22
0
long lib_ring_buffer_compat_ioctl(struct file *filp, unsigned int cmd,
		unsigned long arg, struct lib_ring_buffer *buf)
{
	struct channel *chan = buf->backend.chan;
	const struct lib_ring_buffer_config *config = &chan->backend.config;

	if (lib_ring_buffer_channel_is_disabled(chan))
		return -EIO;

	switch (cmd) {
	case RING_BUFFER_COMPAT_SNAPSHOT:
		return lib_ring_buffer_snapshot(buf, &buf->cons_snapshot,
						&buf->prod_snapshot);
	case RING_BUFFER_COMPAT_SNAPSHOT_SAMPLE_POSITIONS:
		return lib_ring_buffer_snapshot_sample_positions(buf,
				&buf->cons_snapshot, &buf->prod_snapshot);
	case RING_BUFFER_COMPAT_SNAPSHOT_GET_CONSUMED:
		return compat_put_ulong(buf->cons_snapshot, arg);
	case RING_BUFFER_COMPAT_SNAPSHOT_GET_PRODUCED:
		return compat_put_ulong(buf->prod_snapshot, arg);
	case RING_BUFFER_COMPAT_GET_SUBBUF:
	{
		__u32 uconsume;
		unsigned long consume;
		long ret;

		ret = get_user(uconsume, (__u32 __user *) arg);
		if (ret)
			return ret; /* will return -EFAULT */
		consume = buf->cons_snapshot;
		consume &= ~0xFFFFFFFFL;
		consume |= uconsume;
		ret = lib_ring_buffer_get_subbuf(buf, consume);
		if (!ret) {
			/* Set file position to zero at each successful "get" */
			filp->f_pos = 0;
		}
		return ret;
	}
	case RING_BUFFER_COMPAT_PUT_SUBBUF:
		lib_ring_buffer_put_subbuf(buf);
		return 0;

	case RING_BUFFER_COMPAT_GET_NEXT_SUBBUF:
	{
		long ret;

		ret = lib_ring_buffer_get_next_subbuf(buf);
		if (!ret) {
			/* Set file position to zero at each successful "get" */
			filp->f_pos = 0;
		}
		return ret;
	}
	case RING_BUFFER_COMPAT_PUT_NEXT_SUBBUF:
		lib_ring_buffer_put_next_subbuf(buf);
		return 0;
	case RING_BUFFER_COMPAT_GET_SUBBUF_SIZE:
	{
		unsigned long data_size;

		data_size = lib_ring_buffer_get_read_data_size(config, buf);
		if (data_size > UINT_MAX)
			return -EFBIG;
		return compat_put_ulong(data_size, arg);
	}
	case RING_BUFFER_COMPAT_GET_PADDED_SUBBUF_SIZE:
	{
		unsigned long size;

		size = lib_ring_buffer_get_read_data_size(config, buf);
		size = PAGE_ALIGN(size);
		if (size > UINT_MAX)
			return -EFBIG;
		return compat_put_ulong(size, arg);
	}
	case RING_BUFFER_COMPAT_GET_MAX_SUBBUF_SIZE:
		if (chan->backend.subbuf_size > UINT_MAX)
			return -EFBIG;
		return compat_put_ulong(chan->backend.subbuf_size, arg);
	case RING_BUFFER_COMPAT_GET_MMAP_LEN:
	{
		unsigned long mmap_buf_len;

		if (config->output != RING_BUFFER_MMAP)
			return -EINVAL;
		mmap_buf_len = chan->backend.buf_size;
		if (chan->backend.extra_reader_sb)
			mmap_buf_len += chan->backend.subbuf_size;
		if (mmap_buf_len > UINT_MAX)
			return -EFBIG;
		return compat_put_ulong(mmap_buf_len, arg);
	}
	case RING_BUFFER_COMPAT_GET_MMAP_READ_OFFSET:
	{
		unsigned long sb_bindex, read_offset;

		if (config->output != RING_BUFFER_MMAP)
			return -EINVAL;
		sb_bindex = subbuffer_id_get_index(config,
						   buf->backend.buf_rsb.id);
		read_offset = buf->backend.array[sb_bindex]->mmap_offset;
		if (read_offset > UINT_MAX)
			return -EINVAL;
		return compat_put_ulong(read_offset, arg);
	}
	case RING_BUFFER_COMPAT_FLUSH:
		lib_ring_buffer_switch_remote(buf);
		return 0;
	case RING_BUFFER_COMPAT_FLUSH_EMPTY:
		lib_ring_buffer_switch_remote_empty(buf);
		return 0;
	default:
		return -ENOIOCTLCMD;
	}
}
static int build_via_table(struct viadev *dev, struct snd_pcm_substream *substream,
			   struct pci_dev *pci,
			   unsigned int periods, unsigned int fragsize)
{
	unsigned int i, idx, ofs, rest;
	struct via82xx_modem *chip = snd_pcm_substream_chip(substream);

	if (dev->table.area == NULL) {
		/*                                                    
                                                           
   */
		if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci),
					PAGE_ALIGN(VIA_TABLE_SIZE * 2 * 8),
					&dev->table) < 0)
			return -ENOMEM;
	}
	if (! dev->idx_table) {
		dev->idx_table = kmalloc(sizeof(*dev->idx_table) * VIA_TABLE_SIZE, GFP_KERNEL);
		if (! dev->idx_table)
			return -ENOMEM;
	}

	/*                  */
	idx = 0;
	ofs = 0;
	for (i = 0; i < periods; i++) {
		rest = fragsize;
		/*                               
                                                         
                        
   */
		do {
			unsigned int r;
			unsigned int flag;
			unsigned int addr;

			if (idx >= VIA_TABLE_SIZE) {
				snd_printk(KERN_ERR "via82xx: too much table size!\n");
				return -EINVAL;
			}
			addr = snd_pcm_sgbuf_get_addr(substream, ofs);
			((u32 *)dev->table.area)[idx << 1] = cpu_to_le32(addr);
			r = PAGE_SIZE - (ofs % PAGE_SIZE);
			if (rest < r)
				r = rest;
			rest -= r;
			if (! rest) {
				if (i == periods - 1)
					flag = VIA_TBL_BIT_EOL; /*                 */
				else
					flag = VIA_TBL_BIT_FLAG; /*                 */
			} else
				flag = 0; /*                              */
			/*
                                                   
                                            
   */
			((u32 *)dev->table.area)[(idx<<1) + 1] = cpu_to_le32(r | flag);
			dev->idx_table[idx].offset = ofs;
			dev->idx_table[idx].size = r;
			ofs += r;
			idx++;
		} while (rest > 0);
	}
	dev->tbl_entries = idx;
	dev->bufsize = periods * fragsize;
	dev->bufsize2 = dev->bufsize / 2;
	return 0;
}
void  __init
swiotlb_init(int verbose)
{
	size_t default_size = IO_TLB_DEFAULT_SIZE;
	unsigned char *vstart;
	unsigned long bytes;

	if (!io_tlb_nslabs) {
		io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
	}

	bytes = io_tlb_nslabs << IO_TLB_SHIFT;

	/* Get IO TLB memory from the low pages */
	vstart = alloc_bootmem_low_pages_nopanic(PAGE_ALIGN(bytes));
	if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose))
		return;

	if (io_tlb_start)
		free_bootmem(io_tlb_start,
				 PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
	pr_warn("Cannot allocate SWIOTLB buffer");
	no_iotlb_memory = true;
}

/*
 * Systems with larger DMA zones (those that don't support ISA) can
 * initialize the swiotlb later using the slab allocator if needed.
 * This should be just like above, but with some error catching.
 */
Esempio n. 25
0
NTSTATUS
NtFreeVirtualMemory(
    __in HANDLE ProcessHandle,
    __inout PVOID *BaseAddress,
    __inout PSIZE_T RegionSize,
    __in ULONG FreeType
)

/*++

Routine Description:

    This function deletes a region of pages within the virtual address
    space of a subject process.

Arguments:

    ProcessHandle - An open handle to a process object.

    BaseAddress - The base address of the region of pages
                  to be freed. This value is rounded down to the
                  next host page address boundary.

    RegionSize - A pointer to a variable that will receive
                 the actual size in bytes of the freed region of
                 pages. The initial value of this argument is
                 rounded up to the next host page size boundary.

    FreeType - A set of flags that describe the type of
               free that is to be performed for the specified
               region of pages.

       FreeType Flags

        MEM_DECOMMIT - The specified region of pages is to be decommitted.

        MEM_RELEASE - The specified region of pages is to be released.

Return Value:

    NTSTATUS.

--*/

{
    KAPC_STATE ApcState;
    PMMVAD_SHORT Vad;
    PMMVAD_SHORT NewVad;
    PMMVAD PreviousVad;
    PMMVAD NextVad;
    PMMVAD ChargedVad;
    PEPROCESS Process;
    KPROCESSOR_MODE PreviousMode;
    PVOID StartingAddress;
    PVOID EndingAddress;
    NTSTATUS Status;
    LOGICAL Attached;
    SIZE_T CapturedRegionSize;
    PVOID CapturedBase;
    PMMPTE StartingPte;
    PMMPTE EndingPte;
    SIZE_T OldQuota;
    SIZE_T QuotaCharge;
    SIZE_T CommitReduction;
    LOGICAL UserPhysicalPages;
    PETHREAD CurrentThread;
    PEPROCESS CurrentProcess;

    PAGED_CODE();

    //
    // Check to make sure FreeType is good.
    //

    if ((FreeType & ~(MEM_DECOMMIT | MEM_RELEASE)) != 0) {
        return STATUS_INVALID_PARAMETER_4;
    }

    //
    // One of MEM_DECOMMIT or MEM_RELEASE must be specified, but not both.
    //

    if (((FreeType & (MEM_DECOMMIT | MEM_RELEASE)) == 0) ||
            ((FreeType & (MEM_DECOMMIT | MEM_RELEASE)) ==
             (MEM_DECOMMIT | MEM_RELEASE))) {
        return STATUS_INVALID_PARAMETER_4;
    }
    CurrentThread = PsGetCurrentThread ();

    CurrentProcess = PsGetCurrentProcessByThread (CurrentThread);

    PreviousMode = KeGetPreviousModeByThread(&CurrentThread->Tcb);

    //
    // Establish an exception handler, probe the specified addresses
    // for write access and capture the initial values.
    //

    try {

        if (PreviousMode != KernelMode) {

            ProbeForWritePointer (BaseAddress);
            ProbeForWriteUlong_ptr (RegionSize);
        }

        //
        // Capture the base address.
        //

        CapturedBase = *BaseAddress;

        //
        // Capture the region size.
        //

        CapturedRegionSize = *RegionSize;

    }
    except (ExSystemExceptionFilter ()) {

        //
        // If an exception occurs during the probe or capture
        // of the initial values, then handle the exception and
        // return the exception code as the status value.
        //

        return GetExceptionCode ();
    }

    //
    // Make sure the specified starting and ending addresses are
    // within the user part of the virtual address space.
    //

    if (CapturedBase > MM_HIGHEST_USER_ADDRESS) {

        //
        // Invalid base address.
        //

        return STATUS_INVALID_PARAMETER_2;
    }

    if ((ULONG_PTR)MM_HIGHEST_USER_ADDRESS - (ULONG_PTR)CapturedBase <
            CapturedRegionSize) {

        //
        // Invalid region size;
        //

        return STATUS_INVALID_PARAMETER_3;

    }

    EndingAddress = (PVOID)(((LONG_PTR)CapturedBase + CapturedRegionSize - 1) |
                            (PAGE_SIZE - 1));

    StartingAddress = PAGE_ALIGN(CapturedBase);

    Attached = FALSE;

    if (ProcessHandle == NtCurrentProcess()) {
        Process = CurrentProcess;
    }
    else {

        //
        // Reference the specified process handle for VM_OPERATION access.
        //

        Status = ObReferenceObjectByHandle (ProcessHandle,
                                            PROCESS_VM_OPERATION,
                                            PsProcessType,
                                            PreviousMode,
                                            (PVOID *)&Process,
                                            NULL);

        if (!NT_SUCCESS(Status)) {
            return Status;
        }

        //
        // If the specified process is not the current process, attach
        // to the specified process.
        //

        if (CurrentProcess != Process) {
            KeStackAttachProcess (&Process->Pcb, &ApcState);
            Attached = TRUE;
        }
    }

    CommitReduction = 0;

    //
    // Get the address creation mutex to block multiple threads from
    // creating or deleting address space at the same time and
    // get the working set mutex so virtual address descriptors can
    // be inserted and walked.  Block APCs to prevent page faults while
    // we own the working set mutex.
    //

    LOCK_ADDRESS_SPACE (Process);

    //
    // Make sure the address space was not deleted.
    //

    if (Process->Flags & PS_PROCESS_FLAGS_VM_DELETED) {
        Status = STATUS_PROCESS_IS_TERMINATING;
        goto ErrorReturn;
    }

    Vad = (PMMVAD_SHORT) MiLocateAddress (StartingAddress);

    if (Vad == NULL) {

        //
        // No Virtual Address Descriptor located for Base Address.
        //

        Status = STATUS_MEMORY_NOT_ALLOCATED;
        goto ErrorReturn;
    }

    //
    // Found the associated Virtual Address Descriptor.
    //

    if (Vad->EndingVpn < MI_VA_TO_VPN (EndingAddress)) {

        //
        // The entire range to delete is not contained within a single
        // virtual address descriptor.  Return an error.
        //

        Status = STATUS_UNABLE_TO_FREE_VM;
        goto ErrorReturn;
    }

    //
    // Check to ensure this Vad is deletable.  Delete is required
    // for both decommit and release.
    //

    if (((Vad->u.VadFlags.PrivateMemory == 0) &&
            (Vad->u.VadFlags.VadType != VadRotatePhysical))
            ||
            (Vad->u.VadFlags.VadType == VadDevicePhysicalMemory)) {

        Status = STATUS_UNABLE_TO_DELETE_SECTION;
        goto ErrorReturn;
    }

    if (Vad->u.VadFlags.NoChange == 1) {

        //
        // An attempt is being made to delete a secured VAD, check
        // to see if this deletion is allowed.
        //

        if (FreeType & MEM_RELEASE) {

            //
            // Specify the whole range, this solves the problem with
            // splitting the VAD and trying to decide where the various
            // secure ranges need to go.
            //

            Status = MiCheckSecuredVad ((PMMVAD)Vad,
                                        MI_VPN_TO_VA (Vad->StartingVpn),
                                        ((Vad->EndingVpn - Vad->StartingVpn) << PAGE_SHIFT) +
                                        PAGE_SIZE,
                                        MM_SECURE_DELETE_CHECK);

        }
        else {
            Status = MiCheckSecuredVad ((PMMVAD)Vad,
                                        CapturedBase,
                                        CapturedRegionSize,
                                        MM_SECURE_DELETE_CHECK);
        }
        if (!NT_SUCCESS (Status)) {
            goto ErrorReturn;
        }
    }

    UserPhysicalPages = FALSE;
    ChargedVad = NULL;

    PreviousVad = MiGetPreviousVad (Vad);
    NextVad = MiGetNextVad (Vad);

    if (FreeType & MEM_RELEASE) {

        //
        // *****************************************************************
        // MEM_RELEASE was specified.
        // *****************************************************************
        //

        //
        // The descriptor for the address range is deletable.  Remove or split
        // the descriptor.
        //

        //
        // If the region size is zero, remove the whole VAD.
        //

        if (CapturedRegionSize == 0) {

            //
            // If the region size is specified as 0, the base address
            // must be the starting address for the region.
            //

            if (MI_VA_TO_VPN (CapturedBase) != Vad->StartingVpn) {
                Status = STATUS_FREE_VM_NOT_AT_BASE;
                goto ErrorReturn;
            }

            //
            // This Virtual Address Descriptor has been deleted.
            //

            StartingAddress = MI_VPN_TO_VA (Vad->StartingVpn);
            EndingAddress = MI_VPN_TO_VA_ENDING (Vad->EndingVpn);

            if (Vad->u.VadFlags.VadType == VadRotatePhysical) {
                Status = MiUnmapViewOfSection (Process,
                                               CapturedBase,
                                               UNMAP_ADDRESS_SPACE_HELD | UNMAP_ROTATE_PHYSICAL_OK);
                ASSERT (CommitReduction == 0);
                Vad = NULL;
                CapturedRegionSize = 1 + (PCHAR)EndingAddress - (PCHAR)StartingAddress;
                goto AllDone;
            }

            //
            // Free all the physical pages that this VAD might be mapping.
            //

            if (Vad->u.VadFlags.VadType == VadLargePages) {

                MiAweViewRemover (Process, (PMMVAD)Vad);

                MiReleasePhysicalCharges (Vad->EndingVpn - Vad->StartingVpn + 1,
                                          Process);

                LOCK_WS_UNSAFE (CurrentThread, Process);

                MiFreeLargePages (MI_VPN_TO_VA (Vad->StartingVpn),
                                  MI_VPN_TO_VA_ENDING (Vad->EndingVpn),
                                  FALSE);
            }
            else if (Vad->u.VadFlags.VadType == VadAwe) {
                MiAweViewRemover (Process, (PMMVAD)Vad);
                MiRemoveUserPhysicalPagesVad (Vad);
                UserPhysicalPages = TRUE;
                LOCK_WS_UNSAFE (CurrentThread, Process);
            }
            else if (Vad->u.VadFlags.VadType == VadWriteWatch) {
                LOCK_WS_UNSAFE (CurrentThread, Process);
                MiPhysicalViewRemover (Process, (PMMVAD)Vad);
            }
            else {
                LOCK_WS_UNSAFE (CurrentThread, Process);
            }

            ChargedVad = (PMMVAD)Vad;

            MiRemoveVad ((PMMVAD)Vad, Process);

            //
            // Free the VAD pool and release quota after releasing our mutexes
            // to reduce contention.
            //
        }
        else {

            //
            // Region's size was not specified as zero, delete the
            // whole VAD or split the VAD.
            //

            if (MI_VA_TO_VPN (StartingAddress) == Vad->StartingVpn) {
                if (MI_VA_TO_VPN (EndingAddress) == Vad->EndingVpn) {

                    //
                    // This Virtual Address Descriptor has been deleted.
                    //
                    // Free all the physical pages that this VAD might be
                    // mapping.
                    //

                    if (Vad->u.VadFlags.VadType == VadRotatePhysical) {
                        Status = MiUnmapViewOfSection (Process,
                                                       CapturedBase,
                                                       UNMAP_ADDRESS_SPACE_HELD | UNMAP_ROTATE_PHYSICAL_OK);
                        ASSERT (CommitReduction == 0);
                        Vad = NULL;
                        CapturedRegionSize = 1 + (PCHAR)EndingAddress - (PCHAR)StartingAddress;
                        goto AllDone;
                    }

                    if (Vad->u.VadFlags.VadType == VadLargePages) {

                        MiAweViewRemover (Process, (PMMVAD)Vad);

                        MiReleasePhysicalCharges (Vad->EndingVpn - Vad->StartingVpn + 1,
                                                  Process);

                        LOCK_WS_UNSAFE (CurrentThread, Process);

                        MiFreeLargePages (MI_VPN_TO_VA (Vad->StartingVpn),
                                          MI_VPN_TO_VA_ENDING (Vad->EndingVpn),
                                          FALSE);
                    }
                    else if (Vad->u.VadFlags.VadType == VadAwe) {
                        MiAweViewRemover (Process, (PMMVAD)Vad);
                        MiRemoveUserPhysicalPagesVad (Vad);
                        UserPhysicalPages = TRUE;
                        LOCK_WS_UNSAFE (CurrentThread, Process);
                    }
                    else if (Vad->u.VadFlags.VadType == VadWriteWatch) {
                        LOCK_WS_UNSAFE (CurrentThread, Process);
                        MiPhysicalViewRemover (Process, (PMMVAD)Vad);
                    }
                    else {
                        LOCK_WS_UNSAFE (CurrentThread, Process);
                    }

                    ChargedVad = (PMMVAD)Vad;

                    MiRemoveVad ((PMMVAD)Vad, Process);

                    //
                    // Free the VAD pool after releasing our mutexes
                    // to reduce contention.
                    //
                }
                else {

                    if ((Vad->u.VadFlags.VadType == VadAwe) ||
                            (Vad->u.VadFlags.VadType == VadLargePages) ||
                            (Vad->u.VadFlags.VadType == VadRotatePhysical) ||
                            (Vad->u.VadFlags.VadType == VadWriteWatch)) {

                        //
                        // Splitting or chopping a physical VAD, large page VAD
                        // or a write-watch VAD is not allowed.
                        //

                        Status = STATUS_FREE_VM_NOT_AT_BASE;
                        goto ErrorReturn;
                    }

                    LOCK_WS_UNSAFE (CurrentThread, Process);

                    //
                    // This Virtual Address Descriptor has a new starting
                    // address.
                    //

                    CommitReduction = MiCalculatePageCommitment (
                                          StartingAddress,
                                          EndingAddress,
                                          (PMMVAD)Vad,
                                          Process);

                    Vad->StartingVpn = MI_VA_TO_VPN ((PCHAR)EndingAddress + 1);
                    Vad->u.VadFlags.CommitCharge -= CommitReduction;
                    ASSERT ((SSIZE_T)Vad->u.VadFlags.CommitCharge >= 0);
                    NextVad = (PMMVAD)Vad;
                    Vad = NULL;
                }
            }
            else {

                if ((Vad->u.VadFlags.VadType == VadAwe) ||
                        (Vad->u.VadFlags.VadType == VadLargePages) ||
                        (Vad->u.VadFlags.VadType == VadRotatePhysical) ||
                        (Vad->u.VadFlags.VadType == VadWriteWatch)) {

                    //
                    // Splitting or chopping a physical VAD, large page VAD
                    // or a write-watch VAD is not allowed.
                    //

                    Status = STATUS_FREE_VM_NOT_AT_BASE;
                    goto ErrorReturn;
                }

                //
                // Starting address is greater than start of VAD.
                //

                if (MI_VA_TO_VPN (EndingAddress) == Vad->EndingVpn) {

                    //
                    // Change the ending address of the VAD.
                    //

                    LOCK_WS_UNSAFE (CurrentThread, Process);

                    CommitReduction = MiCalculatePageCommitment (
                                          StartingAddress,
                                          EndingAddress,
                                          (PMMVAD)Vad,
                                          Process);

                    Vad->u.VadFlags.CommitCharge -= CommitReduction;

                    Vad->EndingVpn = MI_VA_TO_VPN ((PCHAR)StartingAddress - 1);
                    PreviousVad = (PMMVAD)Vad;
                }
                else {

                    //
                    // Split this VAD as the address range is within the VAD.
                    //

                    NewVad = ExAllocatePoolWithTag (NonPagedPool,
                                                    sizeof(MMVAD_SHORT),
                                                    'FdaV');

                    if (NewVad == NULL) {
                        Status = STATUS_INSUFFICIENT_RESOURCES;
                        goto ErrorReturn;
                    }

                    *NewVad = *Vad;

                    NewVad->StartingVpn = MI_VA_TO_VPN ((PCHAR)EndingAddress + 1);
                    //
                    // Set the commit charge to zero so MiInsertVad will
                    // not charge commitment for splitting the VAD.
                    //

                    NewVad->u.VadFlags.CommitCharge = 0;

                    //
                    // Insert the VAD, this could fail due to quota charges.
                    //

                    Status = MiInsertVadCharges ((PMMVAD)NewVad, Process);

                    if (!NT_SUCCESS(Status)) {

                        //
                        // The quota charging failed, free the new VAD
                        // and return an error.
                        //

                        UNLOCK_ADDRESS_SPACE (Process);
                        ExFreePool (NewVad);
                        goto ErrorReturn2;
                    }

                    LOCK_WS_UNSAFE (CurrentThread, Process);

                    CommitReduction = MiCalculatePageCommitment (
                                          StartingAddress,
                                          EndingAddress,
                                          (PMMVAD)Vad,
                                          Process);

                    OldQuota = Vad->u.VadFlags.CommitCharge - CommitReduction;

                    Vad->EndingVpn = MI_VA_TO_VPN ((PCHAR)StartingAddress - 1);

                    MiInsertVad ((PMMVAD)NewVad, Process);

                    //
                    // As we have split the original VAD into 2 separate VADs
                    // there is no way of knowing what the commit charge
                    // is for each VAD.  Calculate the charge and reset
                    // each VAD.  Note that we also use the previous value
                    // to make sure the books stay balanced.
                    //

                    QuotaCharge = MiCalculatePageCommitment (MI_VPN_TO_VA (Vad->StartingVpn),
                                  (PCHAR)StartingAddress - 1,
                                  (PMMVAD)Vad,
                                  Process);

                    Vad->u.VadFlags.CommitCharge = QuotaCharge;

                    //
                    // Give the remaining charge to the new VAD.
                    //

                    NewVad->u.VadFlags.CommitCharge = OldQuota - QuotaCharge;
                    PreviousVad = (PMMVAD)Vad;
                    NextVad = (PMMVAD)NewVad;
                }
                Vad = NULL;
            }
        }

        if (UserPhysicalPages == TRUE) {
            MiDeletePageTablesForPhysicalRange (StartingAddress, EndingAddress);
        }
        else {

            MiDeleteVirtualAddresses (StartingAddress,
                                      EndingAddress,
                                      NULL);
        }

        UNLOCK_WS_UNSAFE (CurrentThread, Process);

        //
        // Return commitment for page table pages if possible.
        //

        MiReturnPageTablePageCommitment (StartingAddress,
                                         EndingAddress,
                                         Process,
                                         PreviousVad,
                                         NextVad);

        if (ChargedVad != NULL) {
            MiRemoveVadCharges (ChargedVad, Process);
        }

        CapturedRegionSize = 1 + (PCHAR)EndingAddress - (PCHAR)StartingAddress;

        //
        // Update the virtual size in the process header.
        //

        Process->VirtualSize -= CapturedRegionSize;

        Process->CommitCharge -= CommitReduction;
        Status = STATUS_SUCCESS;

AllDone:
        UNLOCK_ADDRESS_SPACE (Process);

        if (CommitReduction != 0) {

            MI_INCREMENT_TOTAL_PROCESS_COMMIT (0 - CommitReduction);

            ASSERT (Vad == NULL);
            PsReturnProcessPageFileQuota (Process, CommitReduction);
            MiReturnCommitment (CommitReduction);

            if (Process->JobStatus & PS_JOB_STATUS_REPORT_COMMIT_CHANGES) {
                PsChangeJobMemoryUsage (PS_JOB_STATUS_REPORT_COMMIT_CHANGES, -(SSIZE_T)CommitReduction);
            }

            MM_TRACK_COMMIT (MM_DBG_COMMIT_RETURN_NTFREEVM1, CommitReduction);
        }
        else if (Vad != NULL) {
            ExFreePool (Vad);
        }

        if (Attached == TRUE) {
            KeUnstackDetachProcess (&ApcState);
        }

        if (ProcessHandle != NtCurrentProcess ()) {
            ObDereferenceObject (Process);
        }
        //
        // Establish an exception handler and write the size and base
        // address.
        //

        try {

            *RegionSize = CapturedRegionSize;
            *BaseAddress = StartingAddress;

        }
        except (EXCEPTION_EXECUTE_HANDLER) {

            //
            // An exception occurred, don't take any action (just handle
            // the exception and return success.

        }

        return Status;
    }

    //
    // **************************************************************
    //
    // MEM_DECOMMIT was specified.
    //
    // **************************************************************
    //

    if (Vad->u.VadFlags.VadType == VadAwe) {

        //
        // Pages from a physical VAD must be released via
        // NtFreeUserPhysicalPages, not this routine.
        //

        Status = STATUS_MEMORY_NOT_ALLOCATED;
        goto ErrorReturn;
    }

    if ((Vad->u.VadFlags.VadType == VadLargePages) ||
            (Vad->u.VadFlags.VadType == VadRotatePhysical)) {

        //
        // Pages from a large page or rotate physical VAD must be released -
        // they cannot be merely decommitted.
        //

        Status = STATUS_MEMORY_NOT_ALLOCATED;
        goto ErrorReturn;
    }

    //
    // Check to ensure the complete range of pages is already committed.
    //

    if (CapturedRegionSize == 0) {

        if (MI_VA_TO_VPN (CapturedBase) != Vad->StartingVpn) {
            Status = STATUS_FREE_VM_NOT_AT_BASE;
            goto ErrorReturn;
        }
        EndingAddress = MI_VPN_TO_VA_ENDING (Vad->EndingVpn);
    }

    //
    // The address range is entirely committed, decommit it now.
    //

    //
    // Calculate the initial quotas and commit charges for this VAD.
    //

    StartingPte = MiGetPteAddress (StartingAddress);
    EndingPte = MiGetPteAddress (EndingAddress);

    CommitReduction = 1 + EndingPte - StartingPte;

    //
    // Check to see if the entire range can be decommitted by
    // just updating the virtual address descriptor.
    //

    CommitReduction -= MiDecommitPages (StartingAddress,
                                        EndingPte,
                                        Process,
                                        Vad);

    //
    // Adjust the quota charges.
    //

    ASSERT ((LONG)CommitReduction >= 0);

    Vad->u.VadFlags.CommitCharge -= CommitReduction;
    ASSERT ((LONG)Vad->u.VadFlags.CommitCharge >= 0);
    Vad = NULL;

    Process->CommitCharge -= CommitReduction;

    UNLOCK_ADDRESS_SPACE (Process);

    if (CommitReduction != 0) {

        MI_INCREMENT_TOTAL_PROCESS_COMMIT (0 - CommitReduction);

        PsReturnProcessPageFileQuota (Process, CommitReduction);
        MiReturnCommitment (CommitReduction);

        if (Process->JobStatus & PS_JOB_STATUS_REPORT_COMMIT_CHANGES) {
            PsChangeJobMemoryUsage (PS_JOB_STATUS_REPORT_COMMIT_CHANGES, -(SSIZE_T)CommitReduction);
        }

        MM_TRACK_COMMIT (MM_DBG_COMMIT_RETURN_NTFREEVM2, CommitReduction);
    }
    else if (Vad != NULL) {
        ExFreePool (Vad);
    }

    if (Attached == TRUE) {
        KeUnstackDetachProcess (&ApcState);
    }

    if (ProcessHandle != NtCurrentProcess()) {
        ObDereferenceObject (Process);
    }

    //
    // Establish an exception handler and write the size and base address.
    //

    try {

        *RegionSize = 1 + (PCHAR)EndingAddress - (PCHAR)StartingAddress;
        *BaseAddress = StartingAddress;

    }
    except (EXCEPTION_EXECUTE_HANDLER) {
        NOTHING;
    }

    return STATUS_SUCCESS;

ErrorReturn:
    UNLOCK_ADDRESS_SPACE (Process);

ErrorReturn2:
    if (Attached == TRUE) {
        KeUnstackDetachProcess (&ApcState);
    }

    if (ProcessHandle != NtCurrentProcess()) {
        ObDereferenceObject (Process);
    }
    return Status;
}
Esempio n. 26
0
    unsigned long camera_size;
    unsigned long gralloc_size;
	unsigned long vpp_size;
#if defined(CONFIG_OVERLAY_COMPOSE)
    unsigned long overlay_size;
#endif
	unsigned long fb_size;
};

/*FHD&HD DTS:2013072204915 modifier: yuanfang y00241633 begin*/
extern int get_isfullhd();
void memset_for_fhd(void);
void memset_for_hd(void);
unsigned long HISI_FRAME_BUFFER_SIZE;
unsigned long HISI_MEM_FB_SIZE_NEW[2] = {
	PAGE_ALIGN(1280 * (STRIDE_ALIGN(ALIGN_UP((720 * 4), 64))) * 4),	 //14400k
	PAGE_ALIGN(38 * SZ_1M)
};

unsigned long HISI_MEM_GPU_SIZE_NEW[2] = {
	PAGE_ALIGN(SZ_16M),
	PAGE_ALIGN(SZ_16M)
};
unsigned long HISI_PMEM_CAMERA_SIZE_NEW[2] = {
	PAGE_ALIGN(4 * SZ_1K),
	PAGE_ALIGN(4 * SZ_1K)
};
unsigned long HISI_MEM_CODEC_SIZE_NEW[2] = {
	PAGE_ALIGN(52 * SZ_1M),
	PAGE_ALIGN(52 * SZ_1M)
};
static void set_tiler1d_slot_size(struct dsscomp_platform_data *dsscomp,
				  struct omap_android_display_data *mem)
{
	struct dsscomp_platform_data data = {
		.tiler1d_slotsz = 0,
	};

	if (dsscomp)
		data = *dsscomp;

	/* do not change board specified value if given */
	if (data.tiler1d_slotsz)
		goto done;

	/*
	 * 4 bytes per pixel, and ICS factor of 4.  The ICS factor
	 * is chosen somewhat arbitrarily to support the home screen layers
	 * to be displayed by DSS.  The size of the home screen layers is
	 * roughly (1 + 2.5 + 0.1 + 0.1) * size_of_the_screen
	 * for the icons, wallpaper, status bar and navigation bar.  Boards
	 * that wish to use a different factor should supply their tiler1D
	 * slot size directly.
	 */
	data.tiler1d_slotsz =
		PAGE_ALIGN(mem->width * mem->height * 4 * 4);

done:
	if (dsscomp)
		*dsscomp = data;
	dsscomp_set_platform_data(&data);

	/* remember setting for ion carveouts */
	mem->tiler1d_mem =
		NUM_ANDROID_TILER1D_SLOTS * data.tiler1d_slotsz;
	pr_info("android_display: tiler1d %u\n", mem->tiler1d_mem);
}

static u32 vram_size(struct omap_android_display_data *mem)
{
	/* calculate required VRAM */
	return PAGE_ALIGN(ALIGN(mem->width, 64) * mem->height * mem->bpp);
}

static void set_vram_sizes(struct sgx_omaplfb_config *sgx_config,
			   struct omapfb_platform_data *fb,
			   struct omap_android_display_data *mem)
{
	u32 num_vram_buffers = 0;
	u32 vram = 0;
	int i;

	if (fb && fb->mem_desc.region_cnt >= 1) {
		/* Need at least 1 VRAM buffer for fb0 */
		num_vram_buffers = 1;
	}

	if (sgx_config) {
		vram += sgx_config->vram_reserve;
		num_vram_buffers = max(sgx_config->vram_buffers,
#if defined(CONFIG_GCBV)
		/* Add 2 extra VRAM buffers for gc320 composition - 4470 only */
				       num_vram_buffers) + 2;
#else
				       num_vram_buffers);
#endif
	}

	vram += num_vram_buffers * vram_size(mem);

	if (fb) {
		/* set fb0 vram needs */
		if (fb->mem_desc.region_cnt >= 1) {
			fb->mem_desc.region[0].size = vram;
			pr_info("android_display: setting fb0.vram to %u\n",
									vram);
		}

		/* set global vram needs incl. additional regions specified */
		for (i = 1; i < fb->mem_desc.region_cnt; i++)
			if (!fb->mem_desc.region[i].paddr)
				vram += fb->mem_desc.region[i].size;
	}

	pr_info("android_display: setting vram to %u\n", vram);
	omap_vram_set_sdram_vram(vram, 0);
}
Esempio n. 28
0
void close_and_munmap(int fd, void *address, size_t size) {
        if (size > 0)
                assert_se(munmap(address, PAGE_ALIGN(size)) >= 0);

        safe_close(fd);
}
Esempio n. 29
0
void __iomem *
__ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
		 void *caller)
{
	unsigned long v, i;
	phys_addr_t p;
	int err;

	/* Make sure we have the base flags */
	if ((flags & _PAGE_PRESENT) == 0)
		flags |= PAGE_KERNEL;

	/* Non-cacheable page cannot be coherent */
	if (flags & _PAGE_NO_CACHE)
		flags &= ~_PAGE_COHERENT;

	/*
	 * Choose an address to map it to.
	 * Once the vmalloc system is running, we use it.
	 * Before then, we use space going down from ioremap_base
	 * (ioremap_bot records where we're up to).
	 */
	p = addr & PAGE_MASK;
	size = PAGE_ALIGN(addr + size) - p;

	/*
	 * If the address lies within the first 16 MB, assume it's in ISA
	 * memory space
	 */
	if (p < 16*1024*1024)
		p += _ISA_MEM_BASE;

#ifndef CONFIG_CRASH_DUMP
	/*
	 * Don't allow anybody to remap normal RAM that we're using.
	 * mem_init() sets high_memory so only do the check after that.
	 */
	if (mem_init_done && (p < virt_to_phys(high_memory))) {
		printk("__ioremap(): phys addr 0x%llx is RAM lr %p\n",
		       (unsigned long long)p, __builtin_return_address(0));
		return NULL;
	}
#endif

	if (size == 0)
		return NULL;

	/*
	 * Is it already mapped?  Perhaps overlapped by a previous
	 * BAT mapping.  If the whole area is mapped then we're done,
	 * otherwise remap it since we want to keep the virt addrs for
	 * each request contiguous.
	 *
	 * We make the assumption here that if the bottom and top
	 * of the range we want are mapped then it's mapped to the
	 * same virt address (and this is contiguous).
	 *  -- Cort
	 */
	if ((v = p_mapped_by_bats(p)) /*&& p_mapped_by_bats(p+size-1)*/ )
		goto out;

	if ((v = p_mapped_by_tlbcam(p)))
		goto out;

	if (mem_init_done) {
		struct vm_struct *area;
		area = get_vm_area_caller(size, VM_IOREMAP, caller);
		if (area == 0)
			return NULL;
		v = (unsigned long) area->addr;
	} else {
		v = (ioremap_bot -= size);
	}

	/*
	 * Should check if it is a candidate for a BAT mapping
	 */

	err = 0;
	for (i = 0; i < size && err == 0; i += PAGE_SIZE)
		err = map_page(v+i, p+i, flags);
	if (err) {
		if (mem_init_done)
			vunmap((void *)v);
		return NULL;
	}

out:
	return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK));
}
Esempio n. 30
0
void
ia64_elf32_init (struct pt_regs *regs)
{
	struct vm_area_struct *vma;

	/*
	 * Map GDT below 4GB, where the processor can find it.  We need to map
	 * it with privilege level 3 because the IVE uses non-privileged accesses to these
	 * tables.  IA-32 segmentation is used to protect against IA-32 accesses to them.
	 */
	vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
	if (vma) {
		vma->vm_mm = current->mm;
		vma->vm_start = IA32_GDT_OFFSET;
		vma->vm_end = vma->vm_start + PAGE_SIZE;
		vma->vm_page_prot = PAGE_SHARED;
		vma->vm_flags = VM_READ|VM_MAYREAD|VM_RESERVED;
		vma->vm_ops = &ia32_shared_page_vm_ops;
		down_write(&current->mm->mmap_sem);
		{
			if (insert_vm_struct(current->mm, vma)) {
				kmem_cache_free(vm_area_cachep, vma);
				up_write(&current->mm->mmap_sem);
				BUG();
			}
		}
		up_write(&current->mm->mmap_sem);
	}

	/*
	 * When user stack is not executable, push sigreturn code to stack makes
	 * segmentation fault raised when returning to kernel. So now sigreturn
	 * code is locked in specific gate page, which is pointed by pretcode
	 * when setup_frame_ia32
	 */
	vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
	if (vma) {
		vma->vm_mm = current->mm;
		vma->vm_start = IA32_GATE_OFFSET;
		vma->vm_end = vma->vm_start + PAGE_SIZE;
		vma->vm_page_prot = PAGE_COPY_EXEC;
		vma->vm_flags = VM_READ | VM_MAYREAD | VM_EXEC
				| VM_MAYEXEC | VM_RESERVED;
		vma->vm_ops = &ia32_gate_page_vm_ops;
		down_write(&current->mm->mmap_sem);
		{
			if (insert_vm_struct(current->mm, vma)) {
				kmem_cache_free(vm_area_cachep, vma);
				up_write(&current->mm->mmap_sem);
				BUG();
			}
		}
		up_write(&current->mm->mmap_sem);
	}

	/*
	 * Install LDT as anonymous memory.  This gives us all-zero segment descriptors
	 * until a task modifies them via modify_ldt().
	 */
	vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
	if (vma) {
		vma->vm_mm = current->mm;
		vma->vm_start = IA32_LDT_OFFSET;
		vma->vm_end = vma->vm_start + PAGE_ALIGN(IA32_LDT_ENTRIES*IA32_LDT_ENTRY_SIZE);
		vma->vm_page_prot = PAGE_SHARED;
		vma->vm_flags = VM_READ|VM_WRITE|VM_MAYREAD|VM_MAYWRITE;
		down_write(&current->mm->mmap_sem);
		{
			if (insert_vm_struct(current->mm, vma)) {
				kmem_cache_free(vm_area_cachep, vma);
				up_write(&current->mm->mmap_sem);
				BUG();
			}
		}
		up_write(&current->mm->mmap_sem);
	}

	ia64_psr(regs)->ac = 0;		/* turn off alignment checking */
	regs->loadrs = 0;
	/*
	 *  According to the ABI %edx points to an `atexit' handler.  Since we don't have
	 *  one we'll set it to 0 and initialize all the other registers just to make
	 *  things more deterministic, ala the i386 implementation.
	 */
	regs->r8 = 0;	/* %eax */
	regs->r11 = 0;	/* %ebx */
	regs->r9 = 0;	/* %ecx */
	regs->r10 = 0;	/* %edx */
	regs->r13 = 0;	/* %ebp */
	regs->r14 = 0;	/* %esi */
	regs->r15 = 0;	/* %edi */

	current->thread.eflag = IA32_EFLAG;
	current->thread.fsr = IA32_FSR_DEFAULT;
	current->thread.fcr = IA32_FCR_DEFAULT;
	current->thread.fir = 0;
	current->thread.fdr = 0;

	/*
	 * Setup GDTD.  Note: GDTD is the descrambled version of the pseudo-descriptor
	 * format defined by Figure 3-11 "Pseudo-Descriptor Format" in the IA-32
	 * architecture manual. Also note that the only fields that are not ignored are
	 * `base', `limit', 'G', `P' (must be 1) and `S' (must be 0).
	 */
	regs->r31 = IA32_SEG_UNSCRAMBLE(IA32_SEG_DESCRIPTOR(IA32_GDT_OFFSET, IA32_PAGE_SIZE - 1,
							    0, 0, 0, 1, 0, 0, 0));
	/* Setup the segment selectors */
	regs->r16 = (__USER_DS << 16) | __USER_DS; /* ES == DS, GS, FS are zero */
	regs->r17 = (__USER_DS << 16) | __USER_CS; /* SS, CS; ia32_load_state() sets TSS and LDT */

	ia32_load_segment_descriptors(current);
	ia32_load_state(current);
}