Ejemplo n.º 1
0
uintptr_t mm_pgalloc(unsigned int npages)
{
#ifdef CONFIG_GRAN_SINGLE
  return (uintptr_t)gran_alloc((size_t)1 << MM_PGSHIFT);
#else
  return (uintptr_t)gran_alloc(g_pgalloc, (size_t)1 << MM_PGSHIFT);
#endif
}
Ejemplo n.º 2
0
void *
fat_dma_alloc(size_t size)
{
	void *rv = NULL;
	perf_count(g_dma_perf);
	rv = gran_alloc(dma_allocator, size);

	if (rv != NULL) {
		dma_heap_inuse += size;

		if (dma_heap_inuse > dma_heap_peak_use) {
			dma_heap_peak_use = dma_heap_inuse;
		}
	}

	return rv;
}
Ejemplo n.º 3
0
void *
fat_dma_alloc(size_t size)
{
	perf_count(g_dma_perf);
	return gran_alloc(dma_allocator, size);
}
Ejemplo n.º 4
0
FAR void *shmat(int shmid, FAR const void *shmaddr, int shmflg)
{
  FAR struct shm_region_s *region;
  FAR struct task_group_s *group;
  FAR struct tcb_s *tcb;
  uintptr_t vaddr;
  unsigned int npages;
  int ret;

  /* Get the region associated with the shmid */

  DEBUGASSERT(shmid >= 0 && shmid < CONFIG_ARCH_SHM_MAXREGIONS);
  region =  &g_shminfo.si_region[shmid];
  DEBUGASSERT((region->sr_flags & SRFLAG_INUSE) != 0);

  /* Get the TCB and group containing our virtual memory allocator */

  tcb = sched_self();
  DEBUGASSERT(tcb && tcb->group);
  group = tcb->group;
  DEBUGASSERT(group->tg_shm.gs_handle != NULL &&
              group->tg_shm.gs_vaddr[shmid] == 0);

  /* Get exclusive access to the region data structure */

  ret = sem_wait(&region->sr_sem);
  if (ret < 0)
    {
      shmdbg("sem_wait failed: %d\n", ret);
      goto errout;
    }

  /* Set aside a virtual address space to span this physical region */

  vaddr = (uintptr_t)gran_alloc(group->tg_shm.gs_handle,
                                region->sr_ds.shm_segsz);
  if (vaddr == 0)
    {
      shmdbg("gran_alloc() failed\n");
      ret = -ENOMEM;
      goto errout_with_semaphore;
    }

  /* Convert the region size to pages */

  npages = MM_PGALIGNUP(region->sr_ds.shm_segsz);

  /* Attach, i.e, map, on shared memory region to the user virtual address. */

  ret = up_shmat(region->sr_pages, npages, vaddr);
  if (ret < 0)
    {
      shmdbg("up_shmat() failed\n");
      goto errout_with_vaddr;
    }

  /* Save the virtual address of the region.  We will need that in shmat()
   * to do the reverse lookup:  Give the virtual address of the region to
   * detach, we need to get the region table index.
   */

  group->tg_shm.gs_vaddr[shmid] = vaddr;

  /* Increment the count of processes attached to this region */

  region->sr_ds.shm_nattch++;

  /* Save the process ID of the the last operation */

  region->sr_ds.shm_lpid = tcb->pid;

  /* Save the time of the last shmat() */

  region->sr_ds.shm_atime = time(NULL);

  /* Release our lock on the entry */

  sem_post(&region->sr_sem);
  return (FAR void *)vaddr;

errout_with_vaddr:
  gran_free(group->tg_shm.gs_handle, (FAR void *)vaddr,
            region->sr_ds.shm_segsz);
errout_with_semaphore:
  sem_post(&region->sr_sem);
  set_errno(-ret);
errout:
  return (FAR void *)ERROR;
}
Ejemplo n.º 5
0
int up_addrenv_create(size_t envsize, FAR task_addrenv_t *addrenv)
{
  FAR struct z180_cbr_s *cbr;
  irqstate_t flags;
  uintptr_t alloc;
  unsigned int npages;
  int ret;

  /* Convert the size from bytes to numbers of pages */

  npages = PHYS_ALIGNUP(envsize);
  if (npages < 1)
    {
      /* No address environment... but I suppose that is not an error */

      sdbg("ERROR: npages is zero\n");
      return OK;
    }
  
  /* Allocate a structure in the common .bss to hold information about the
   * task's address environment.  NOTE that this is not a part of the TCB,
   * but rather a break-away structure that can be shared by the task as
   * well as other threads.  That is necessary because the life of the
   * address of environment might be longer than the life of the task.
   */

  flags = irqsave();
  cbr = z180_mmu_alloccbr();
  if (!cbr)
    {
      sdbg("ERROR: No free CBR structures\n");
      ret = -ENOMEM;
      goto errout_with_irq;
    }

  /* Now allocate the physical memory to back up the address environment */

#ifdef CONFIG_GRAN_SINGLE
  alloc = (uintptr_t)gran_alloc(npages);
#else
  alloc = (uintptr_t)gran_alloc(g_physhandle, npages);
#endif
  if (!alloc)
    {
      sdbg("ERROR: Failed to allocate %d pages\n", npages);
      ret = -ENOMEM;
      goto errout_with_cbr;
    }

  /* Save the information in the CBR structure.  Note that alloc is in
   * 4KB pages, already in the right form for the CBR.
   */

  DEBUGASSERT(alloc <= 0xff);

  cbr->cbr     = (uint8_t)alloc;
  cbr->pages   = (uint8_t)npages;
  *addrenv     = (task_addrenv_t)cbr;

  irqrestore(flags);
  return OK;

errout_with_cbr:
  z180_mmu_freecbr(cbr);
  
errout_with_irq:
  irqrestore(flags);
  return ret;
}
Ejemplo n.º 6
0
void *fat_dma_alloc(size_t size)
{
  return gran_alloc(dma_allocator, size);
}