Ejemplo n.º 1
0
static void *
check_malloc (size_t size)
{
  size_t rounded_up_size;
  void *result;

  __free_hook = 0;
  __malloc_hook = 0;
  if (size == 0)
    {
      result = 0;
      goto end;
    }
#ifdef UNMAPPED_FREE
  /* Round up to an even number of pages. */
  rounded_up_size = ROUND_UP_TO_PAGE (size);
#else
  rounded_up_size = size;
#endif
  result = malloc (rounded_up_size);
  if (!pointer_table)
    pointer_table = make_hash_table (FREE_QUEUE_LIMIT * 2);
  puthash (result, (void *)size, pointer_table);
  __free_hook = check_free;
  __malloc_hook = check_malloc;
 end:
  return result;
}
Ejemplo n.º 2
0
LM_STATUS
MM_AllocateSharedMemory(PLM_DEVICE_BLOCK pDevice, LM_UINT32 BlockSize,
	PLM_VOID *pMemoryBlockVirt, PLM_PHYSICAL_ADDRESS pMemoryBlockPhy,
	LM_BOOL cached /* we ignore this */)
{
	struct be_b57_dev *dev;
	void *pvirt = NULL;
	area_id area_desc;
	physical_entry entry;

	dev = (struct be_b57_dev *)(pDevice);
	area_desc = dev->lockmem_list[dev->lockmem_list_num++] = create_area("broadcom_shared_mem",
		&pvirt, B_ANY_KERNEL_ADDRESS, ROUND_UP_TO_PAGE(BlockSize),
		B_CONTIGUOUS, 0);

	if (area_desc < B_OK)
		return LM_STATUS_FAILURE;

	memset(pvirt, 0, BlockSize);
	*pMemoryBlockVirt = (PLM_VOID) pvirt;

	get_memory_map(pvirt,BlockSize,&entry,1);
	pMemoryBlockPhy->Low = (uint32)entry.address;
	pMemoryBlockPhy->High = (uint32)(entry.address >> 32);
		/* We only support 32 bit */

	return LM_STATUS_SUCCESS;
}
Ejemplo n.º 3
0
void Mem_InitPhysicalBitmap() {
    struct MB_mmap_t *l_Mmap, *l_MmapEnd;
    l_Mmap = MB_MemMap;
    l_MmapEnd = (struct MB_mmap_t *) (((uint8_t *) MB_MemMap) + MB_MemMap_Length);
    uint64_t l_MaxPageAddr = 0;
    while (l_Mmap < l_MmapEnd) {
        if (l_Mmap->m_Type == 1) {
            uint64_t t_BaseAddr = l_Mmap->m_BlockBaseAddr;
            uint64_t t_Length = l_Mmap->m_BlockLength;
            if (t_BaseAddr + t_Length > l_MaxPageAddr)
                l_MaxPageAddr = ROUND_DOWN_TO_PAGE((t_BaseAddr + t_Length + 1));
            __MarkOnBitmap(t_BaseAddr, t_Length);
        }
        l_Mmap = (struct MB_mmap_t *) (((uint8_t *) l_Mmap) + l_Mmap->m_Size + 4);
    }
    global_Memory.m_LastPhysicalPage = l_MaxPageAddr;
    MemBmp_Initialise(&global_Memory.m_PhysicalBitmap, PhysMem_Bitmap,
            (SHIFT_PAGE(l_MaxPageAddr) / 32) + 1);
    // Exclude kernel code from bitmap
    uint64_t t_Page = PHYSICAL_KERNEL_ADDRESS,
            t_LastPage = PHYSICAL_KERNEL_ADDRESS +
            ((uintptr_t) & KERNEL_END)-((uintptr_t) & KERNEL_START);
    t_LastPage = ROUND_DOWN_TO_PAGE(t_LastPage);
    while (t_Page <= t_LastPage) {
        MemBmp_AcquirePage(&global_Memory.m_PhysicalBitmap, (void *) t_Page);
        t_Page += PAGE_SIZE;
    }
    global_Memory.m_KernelSpace.m_Begin = (void *) ROUND_UP_TO_PAGE((uintptr_t) & KERNEL_END);
    global_Memory.m_KernelSpace.m_End = (void *) ((uintptr_t) 0);
}
Ejemplo n.º 4
0
LM_STATUS
MM_MapMemBase(PLM_DEVICE_BLOCK pDevice)
{
	struct be_b57_dev *pUmDevice = (struct be_b57_dev *)(pDevice);
	size_t size = pUmDevice->pci_data.u.h0.base_register_sizes[0];

	size = ROUND_UP_TO_PAGE(size);
	pUmDevice->mem_base = map_physical_memory("broadcom_regs",
		pUmDevice->pci_data.u.h0.base_registers[0], size,
		B_ANY_KERNEL_BLOCK_ADDRESS, 0,
		(void **)(&pDevice->pMappedMemBase));

	return LM_STATUS_SUCCESS;
}
Ejemplo n.º 5
0
static void __MarkOnBitmap(uint64_t BaseAddr, uint64_t Length) {
    BaseAddr = SHIFT_PAGE(ROUND_UP_TO_PAGE(BaseAddr));
    Length = SHIFT_PAGE(ROUND_DOWN_TO_PAGE(Length + BaseAddr));
    uint8_t t_Bit = BaseAddr % 32;
    uint32_t t_Byte = BaseAddr / 32;
    while (BaseAddr < Length) {
        PhysMem_Bitmap[t_Byte] |= (1 << t_Bit);
        if (++t_Bit == 32) {
            t_Bit = 0;
            ++t_Byte;
        }
        ++BaseAddr;
    }
}
Ejemplo n.º 6
0
int file_mmap_configured(struct file * filp, struct vm_area_struct * vma){

   struct phys_mem_session* session = (struct phys_mem_session*) filp->private_data;
   int ret = 0;

   unsigned long  max_size;


    if (down_interruptible (&session->sem))
            return -ERESTARTSYS;

    if ((GET_STATE(session) != SESSION_STATE_CONFIGURED)
        && (GET_STATE(session) != SESSION_STATE_MAPPED) ) {
      ret = -EIO;
      printk(KERN_NOTICE "The session cannot be mmaped in state %i", GET_STATE(session));
      goto err;
    }

    max_size = ROUND_UP_TO_PAGE(SESSION_FRAME_STATI_SIZE(session->num_frame_stati));
    max_size <<= PAGE_SHIFT;

    if ( vma->vm_end - vma->vm_start > max_size){
      ret = -EINVAL;
      printk(KERN_NOTICE "Mmap too large:  %lx > %lx", vma->vm_end - vma->vm_start, max_size );
      goto err;
    }

    ret = assemble_vma(session, vma);
    if (ret)
      goto err;

    vma->vm_ops = &phys_mem_vm_ops;
    vma->vm_flags |= (VM_DONTEXPAND | VM_DONTDUMP);
    vma->vm_flags |= VM_IO;
    vma->vm_private_data = session;

   up(&session->sem);
   phys_mem_vma_open(vma);
   return ret;
err:
  up(&session->sem);
  return ret;

}
Ejemplo n.º 7
0
static void
check_free (void *ptr)
{
  __free_hook = 0;
  __malloc_hook = 0;
  if (!pointer_table)
    pointer_table = make_hash_table (max (100, FREE_QUEUE_LIMIT * 2));
  if (ptr != 0)
    {
      long size;
#ifdef UNMAPPED_FREE
      unsigned long rounded_up_size;
#endif

      EMACS_INT present = (EMACS_INT) gethash (ptr, pointer_table,
					       (const void **) &size);

      if (!present)
	{
	/* This can only happen if you try to free something that didn't
	   come from malloc */
#if !defined(__linux__)
	  /* I originally wrote:  "There's really no need to drop core."
	     I have seen the error of my ways. -slb */
	  if (strict_free_check)
	    ABORT ();
#endif
	  printf("Freeing unmalloc'ed memory at %p\n", ptr);
	  __free_hook = check_free;
	  __malloc_hook = check_malloc;
	  goto end;
	}

      if (size < 0)
	{
	  /* This happens when you free twice */
#if !defined(__linux__)
	  /* See above comment. */
	  if (strict_free_check)
	    ABORT ();
#endif
	  printf("Freeing %p twice\n", ptr);
	  __free_hook = check_free;
	  __malloc_hook = check_malloc;
	  goto end;
	}

      puthash (ptr, (void *)-size, pointer_table);
#ifdef UNMAPPED_FREE
      /* Round up size to an even number of pages. */
      rounded_up_size = ROUND_UP_TO_PAGE (size);
      /* Protect the pages freed from all access */
      if (strict_free_check)
	mprotect (ptr, rounded_up_size, PROT_NONE);
#else
      /* Set every word in the block to 0xdeadbeef */
      if (strict_free_check)
	{
	  unsigned long long_length = (size + (sizeof (long) - 1))
	    / sizeof (long);
	  unsigned long i;

	  for (i = 0; i < long_length; i++)
	    ((unsigned long *) ptr)[i] = 0xdeadbeef;
	}
#endif
      free_queue[current_free].address = ptr;
      free_queue[current_free].length = size;

      current_free++;
      if (current_free >= FREE_QUEUE_LIMIT)
	current_free = 0;
      /* Really free this if there's something there */
      {
	void *old = free_queue[current_free].address;

	if (old)
	  {
#ifdef UNMAPPED_FREE
	    unsigned long old_len = free_queue[current_free].length;

	    mprotect (old, old_len,  PROT_READ | PROT_WRITE | PROT_EXEC);
#endif
	    free (old);
	    remhash (old, pointer_table);
	  }
      }
    }
  __free_hook = check_free;
  __malloc_hook = check_malloc;

 end:
  return;
}