Exemple #1
0
static grub_err_t
grub_linuxefi_unload (void)
{
    grub_dl_unref (my_mod);
    loaded = 0;
    if (initrd_mem)
        grub_efi_free_pages((grub_efi_physical_address_t)initrd_mem, BYTES_TO_PAGES(params->ramdisk_size));
    if (linux_cmdline)
        grub_efi_free_pages((grub_efi_physical_address_t)linux_cmdline, BYTES_TO_PAGES(params->cmdline_size + 1));
    if (kernel_mem)
        grub_efi_free_pages((grub_efi_physical_address_t)kernel_mem, BYTES_TO_PAGES(kernel_size));
    if (params)
        grub_efi_free_pages((grub_efi_physical_address_t)params, BYTES_TO_PAGES(16384));
    return GRUB_ERR_NONE;
}
Exemple #2
0
NTSTATUS
NTAPI
MiCreatePebOrTeb(IN PEPROCESS Process,
                 IN ULONG Size,
                 OUT PULONG_PTR Base)
{
    PETHREAD Thread = PsGetCurrentThread();
    PMMVAD_LONG Vad;
    NTSTATUS Status;
    ULONG RandomCoeff;
    ULONG_PTR StartAddress, EndAddress;
    LARGE_INTEGER CurrentTime;
    TABLE_SEARCH_RESULT Result = TableFoundNode;
    PMMADDRESS_NODE Parent;

    /* Allocate a VAD */
    Vad = ExAllocatePoolWithTag(NonPagedPool, sizeof(MMVAD_LONG), 'ldaV');
    if (!Vad) return STATUS_NO_MEMORY;

    /* Setup the primary flags with the size, and make it commited, private, RW */
    Vad->u.LongFlags = 0;
    Vad->u.VadFlags.CommitCharge = BYTES_TO_PAGES(Size);
    Vad->u.VadFlags.MemCommit = TRUE;
    Vad->u.VadFlags.PrivateMemory = TRUE;
    Vad->u.VadFlags.Protection = MM_READWRITE;
    Vad->u.VadFlags.NoChange = TRUE;

    /* Setup the secondary flags to make it a secured, writable, long VAD */
    Vad->u2.LongFlags2 = 0;
    Vad->u2.VadFlags2.OneSecured = TRUE;
    Vad->u2.VadFlags2.LongVad = TRUE;
    Vad->u2.VadFlags2.ReadOnly = FALSE;

    /* Lock the process address space */
    KeAcquireGuardedMutex(&Process->AddressCreationLock);

    /* Check if this is a PEB creation */
    if (Size == sizeof(PEB))
    {
        /* Start at the highest valid address */
        StartAddress = (ULONG_PTR)MM_HIGHEST_VAD_ADDRESS + 1;

        /* Select the random coefficient */
        KeQueryTickCount(&CurrentTime);
        CurrentTime.LowPart &= ((64 * _1KB) >> PAGE_SHIFT) - 1;
        if (CurrentTime.LowPart <= 1) CurrentTime.LowPart = 2;
        RandomCoeff = CurrentTime.LowPart << PAGE_SHIFT;

        /* Select the highest valid address minus the random coefficient */
        StartAddress -= RandomCoeff;
        EndAddress = StartAddress + ROUND_TO_PAGES(Size) - 1;

        /* Try to find something below the random upper margin */
        Result = MiFindEmptyAddressRangeDownTree(ROUND_TO_PAGES(Size),
                                                 EndAddress,
                                                 PAGE_SIZE,
                                                 &Process->VadRoot,
                                                 Base,
                                                 &Parent);
    }
Exemple #3
0
static bool setup_pml4(uintptr_t *pml4)
{
	PPHYSICAL_MEMORY_RANGE pm_ranges = MmGetPhysicalMemoryRanges();
	bool ret = false;

	for (int run = 0;; ++run) {
		uintptr_t base_addr = pm_ranges[run].BaseAddress.QuadPart;
		uintptr_t bytes = pm_ranges[run].NumberOfBytes.QuadPart;
		if (!base_addr || !bytes)
			break;

		uintptr_t nr_pages = BYTES_TO_PAGES(bytes);
		for (uintptr_t page = 0; page < nr_pages; ++page) {
			uintptr_t page_addr = base_addr + page * PAGE_SIZE;
			uintptr_t *entry = ept_alloc_page(NULL, pml4, EPT_ACCESS_ALL, page_addr);
			if (!entry)
				goto out;
		}
	}

	/* Allocate APIC page  */
	ret = !!ept_alloc_page(NULL, pml4, EPT_ACCESS_ALL, __readmsr(MSR_IA32_APICBASE) & MSR_IA32_APICBASE_BASE);

out:
	ExFreePool(pm_ranges);
	return ret;
}
Exemple #4
0
NTSTATUS DriverEntry(PDRIVER_OBJECT driverObject, PUNICODE_STRING registryPath)
{
	LdrDataTableEntry *entry = driverObject->DriverSection;
	PsLoadedModuleList = entry->in_load_links.Flink;
	driverObject->DriverUnload = DriverUnload;

	VCPU_DEBUG("We're mapped at %p (size: %d bytes (%d KB), on %d pages)\n",
		   entry->base, entry->size, entry->size / 1024, entry->size / PAGE_SIZE);
	LdrDataTableEntry *kentry = container_of(PsLoadedModuleList->Flink, LdrDataTableEntry, in_load_links);
	g_kernel_base = kentry->base;

	VCPU_DEBUG("Kernel: %p -> %p (size: 0x%X pages: %d) path: %wS\n",
		   kentry->base, (uintptr_t)kentry->base + kentry->size,
		   kentry->size, BYTES_TO_PAGES(kentry->size),
		   kentry->path.Buffer);
	ExInitializeDriverRuntime(DrvRtPoolNxOptIn);

	NTSTATUS status = ksm_init();
	if (NT_SUCCESS(status))
		status = register_power_callback(&g_dev_ext);

	if (NT_SUCCESS(status))
		status = PsCreateSystemThread(&hThread, STANDARD_RIGHTS_ALL, NULL, NULL, &cid, (PKSTART_ROUTINE)sys_thread, NULL);

	VCPU_DEBUG("ret: 0x%08X\n", status);
	return status;
}
Exemple #5
0
PVOID
NTAPI
HalpAcpiCopyBiosTable(IN PLOADER_PARAMETER_BLOCK LoaderBlock,
                      IN PDESCRIPTION_HEADER TableHeader)
{
    ULONG Size;
    PFN_COUNT PageCount;
    PHYSICAL_ADDRESS PhysAddress;
    PACPI_CACHED_TABLE CachedTable;
    PDESCRIPTION_HEADER CopiedTable;

    /* Size we'll need for the cached table */
    Size = TableHeader->Length + FIELD_OFFSET(ACPI_CACHED_TABLE, Header);
    if (LoaderBlock)
    {
        /* Phase 0: Convert to pages and use the HAL heap */
        PageCount = BYTES_TO_PAGES(Size);
        PhysAddress.QuadPart = HalpAllocPhysicalMemory(LoaderBlock,
                                                       0x1000000,
                                                       PageCount,
                                                       FALSE);
        if (PhysAddress.QuadPart)
        {
            /* Map it */
            CachedTable = HalpMapPhysicalMemory64(PhysAddress, PageCount);
        }
        else
        {
            /* No memory, so nothing to map */
            CachedTable = NULL;
        }
    }
    else
    {
        /* Use Mm pool */
        CachedTable = ExAllocatePoolWithTag(NonPagedPool, Size, ' laH');
    }

    /* Do we have the cached table? */
    if (CachedTable)
    {
        /* Copy the data */
        CopiedTable = &CachedTable->Header;
        RtlCopyMemory(CopiedTable, TableHeader, TableHeader->Length);
    }
    else
    {
        /* Nothing to return */
        CopiedTable = NULL;
    }

    /* Return the table */
    return CopiedTable;
}
size_t VIOSerialSendBuffers(IN PVIOSERIAL_PORT Port,
                            IN PVOID Buffer,
                            IN size_t Length)
{
    struct virtqueue *vq = GetOutQueue(Port);
    struct VirtIOBufferDescriptor sg[QUEUE_DESCRIPTORS];
    PVOID buffer = Buffer;
    size_t length = Length;
    int out = 0;
    int ret;

    TraceEvents(TRACE_LEVEL_VERBOSE, DBG_WRITE,
                "--> %s Buffer: %p Length: %d\n", __FUNCTION__, Buffer, Length);

    if (BYTES_TO_PAGES(Length) > QUEUE_DESCRIPTORS)
    {
        return 0;
    }

    while (length > 0)
    {
        sg[out].physAddr = MmGetPhysicalAddress(buffer);
        sg[out].length = min(length, PAGE_SIZE);

        buffer = (PVOID)((LONG_PTR)buffer + sg[out].length);
        length -= sg[out].length;
        out += 1;
    }

    WdfSpinLockAcquire(Port->OutVqLock);

    ret = virtqueue_add_buf(vq, sg, out, 0, Buffer, NULL, 0);
    virtqueue_kick(vq);

    if (ret >= 0)
    {
        Port->OutVqFull = (ret == 0);
    }
    else
    {
        Length = 0;
        TraceEvents(TRACE_LEVEL_ERROR, DBG_WRITE,
                    "Error adding buffer to queue (ret = %d)\n", ret);
    }

    WdfSpinLockRelease(Port->OutVqLock);

    TraceEvents(TRACE_LEVEL_VERBOSE, DBG_WRITE, "<-- %s\n", __FUNCTION__);

    return Length;
}
Exemple #7
0
/* Wrap the static make_area */
Area* s48_make_area(s48_address start, s48_address end,
		    s48_address frontier, 
		    unsigned char generation_index,
		    area_type_size_t area_type_size) {
  Area* area = make_area(start, end, frontier, generation_index, area_type_size);
  /* The area is put into all memory-map cells that are covered by
     it. */
  int size = BYTES_TO_PAGES(end-start);
  int i;
  for (i = 0; i < size; i++)
    /* Safe because I is less than SIZE, which cannot cause an
       overflow here. */
    s48_memory_map_setB(ADD_PAGES_I_KNOW_THIS_CAN_OVERFLOW(start, i), area);
  return area;
}
static bool iwm_tx_credit_ok(struct iwm_priv *iwm, int id, int nb)
{
	u32 npages = BYTES_TO_PAGES(nb);

	if (npages <= iwm_tx_credit_get(&iwm->tx_credit, id))
		return 1;

	set_bit(id, &iwm->tx_credit.full_pools_map);

	IWM_DBG_TX(iwm, DBG, "LINK: stop txq[%d], available credit: %d\n",
		   pool_id_to_queue(id),
		   iwm_tx_credit_get(&iwm->tx_credit, id));

	return 0;
}
Exemple #9
0
int
grub_efi_get_memory_map (grub_efi_uintn_t *map_key,
			 grub_efi_uintn_t *descriptor_size,
			 grub_efi_uint32_t *descriptor_version)
{
  grub_efi_status_t status;
  grub_efi_boot_services_t *b;
  grub_efi_uintn_t key;
  grub_efi_uint32_t version;
  grub_efi_uintn_t tmp_mmap_size;

  /* Allow some parameters to be missing.  */
  if (! map_key)
    map_key = &key;
  if (! descriptor_version)
    descriptor_version = &version;

  while (1)
    {
      b = grub_efi_system_table->boot_services;
      tmp_mmap_size = PAGES_TO_BYTES(mmap_pages);
      status = Call_Service_5 (b->get_memory_map,
			      &tmp_mmap_size, mmap_buf, map_key,
			      descriptor_size, descriptor_version);
      if (status == GRUB_EFI_SUCCESS)
        {
          mmap_size = tmp_mmap_size;
          return 1;
        }
      else if (status != GRUB_EFI_BUFFER_TOO_SMALL)
        return -1;

      /* we need a larger buffer */
      if (mmap_buf)
        grub_efi_free_pages ((grub_addr_t) mmap_buf, mmap_pages);

      /* get 1 more page than we need, just in case */
      mmap_pages = BYTES_TO_PAGES(tmp_mmap_size + 4095) + 1;
      mmap_buf = grub_efi_allocate_pages (0, mmap_pages);
      if (! mmap_buf)
        {
          mmap_pages = 0;
          grub_printf ("cannot allocate memory for memory map");
          return -1;
        }
    }
}
int iwm_tx_credit_alloc(struct iwm_priv *iwm, int id, int nb)
{
	u32 npages = BYTES_TO_PAGES(nb);
	int ret = 0;

	spin_lock(&iwm->tx_credit.lock);

	if (!iwm_tx_credit_ok(iwm, id, nb)) {
		IWM_DBG_TX(iwm, DBG, "No credit available for pool[%d]\n", id);
		ret = -ENOSPC;
		goto out;
	}

	iwm_tx_credit_dec(iwm, id, npages);

 out:
	spin_unlock(&iwm->tx_credit.lock);
	return ret;
}
Exemple #11
0
/*
 * @implemented
 */
VOID
NTAPI
MmFreeNonCachedMemory(IN PVOID BaseAddress,
                      IN SIZE_T NumberOfBytes)
{
    PMDL Mdl;
    PMMPTE PointerPte;
    PFN_COUNT PageCount;

    //
    // Sanity checks
    //
    ASSERT(NumberOfBytes != 0);
    ASSERT(PAGE_ALIGN(BaseAddress) == BaseAddress);

    //
    // Get the page count
    //
    PageCount = (PFN_COUNT)BYTES_TO_PAGES(NumberOfBytes);

    //
    // Get the first PTE
    //
    PointerPte = MiAddressToPte(BaseAddress);

    //
    // Remember this is where we store the shadow MDL pointer
    //
    Mdl = *(PMDL*)(--PointerPte);

    //
    // Kill the MDL (and underlying pages)
    //
    MmFreePagesFromMdl(Mdl);
    ExFreePoolWithTag(Mdl, TAG_MDL);

    //
    // Now free the system PTEs for the underlying VA
    //
    MiReleaseSystemPtes(PointerPte, PageCount + 1, SystemPteSpace);
}
Exemple #12
0
void s48_free_area(Area* area) {
  unsigned long size = BYTES_TO_PAGES(area->end - area->start);
  s48_address start = area->start;
  unsigned long i;
  
  s48_free_pagesB(start, size);

  /* This is not really needed, I think. It's only a waste of time */
  for (i = 0; i < size; i++) {
    /* Safe because I is less than SIZE, which cannot cause an
       overflow here. */
    s48_memory_map_setB(ADD_PAGES_I_KNOW_THIS_CAN_OVERFLOW(start, i), NULL);
  }

#ifndef NDEBUG
  /* Blank it out, to find errors more easily */
  memset(area->start, 0, area->end - area->start);
#endif

  free_area(area);
}
Exemple #13
0
/**
 * effects:Build and Initialize General Trap struct (which is also a Trap struct).
 */
NTSTATUS NTAPI TrInitializeGeneralTrap (
    PCPU Cpu,
    ULONG TrappedVmExit,
    UCHAR RipDelta,
    NBP_TRAP_CALLBACK TrapCallback,
    PNBP_TRAP *pInitializedTrap
)
{//Finish
    PNBP_TRAP Trap;
	Print(("HelloWorld:TrInitializeGeneralTrap():TrappedVmExit 0x%x\n", TrappedVmExit));

    if (!Cpu || 
        !TrapCallback || 
        !Hvm->ArchIsTrapValid (TrappedVmExit) ||//<----------------5.1 Finish
        !pInitializedTrap)
    {
        return STATUS_INVALID_PARAMETER;
    }

    Trap = (PNBP_TRAP)MmAllocatePages (BYTES_TO_PAGES (sizeof (NBP_TRAP)), NULL, TRUE);
    if (!Trap) 
    {
        Print(("HelloWorld:TrInitializeGeneralTrap(): Failed to allocate NBP_TRAP structure (%d bytes)\n", sizeof (NBP_TRAP)));
        return STATUS_INSUFFICIENT_RESOURCES;
    }

    RtlZeroMemory (Trap, sizeof (NBP_TRAP));

    Trap->TrapType = TRAP_GENERAL;
    Trap->TrappedVmExit = TrappedVmExit;
    Trap->RipDelta = RipDelta;
    Trap->TrapCallback = TrapCallback;

    *pInitializedTrap = Trap;

    return STATUS_SUCCESS;
}
Exemple #14
0
VOID
NTAPI
INIT_FUNCTION
MiInitializeNonPagedPool(VOID)
{
    ULONG i;
    PFN_NUMBER PoolPages;
    PMMFREE_POOL_ENTRY FreeEntry, FirstEntry;
    PMMPTE PointerPte;
    PAGED_CODE();

    //
    // We keep 4 lists of free pages (4 lists help avoid contention)
    //
    for (i = 0; i < MI_MAX_FREE_PAGE_LISTS; i++)
    {
        //
        // Initialize each of them
        //
        InitializeListHead(&MmNonPagedPoolFreeListHead[i]);
    }

    //
    // Calculate how many pages the initial nonpaged pool has
    //
    PoolPages = BYTES_TO_PAGES(MmSizeOfNonPagedPoolInBytes);
    MmNumberOfFreeNonPagedPool = PoolPages;

    //
    // Initialize the first free entry
    //
    FreeEntry = MmNonPagedPoolStart;
    FirstEntry = FreeEntry;
    FreeEntry->Size = PoolPages;
    FreeEntry->Signature = MM_FREE_POOL_SIGNATURE;
    FreeEntry->Owner = FirstEntry;

    //
    // Insert it into the last list
    //
    InsertHeadList(&MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS - 1],
                   &FreeEntry->List);

    //
    // Now create free entries for every single other page
    //
    while (PoolPages-- > 1)
    {
        //
        // Link them all back to the original entry
        //
        FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)FreeEntry + PAGE_SIZE);
        FreeEntry->Owner = FirstEntry;
        FreeEntry->Signature = MM_FREE_POOL_SIGNATURE;
    }

    //
    // Validate and remember first allocated pool page
    //
    PointerPte = MiAddressToPte(MmNonPagedPoolStart);
    ASSERT(PointerPte->u.Hard.Valid == 1);
    MiStartOfInitialPoolFrame = PFN_FROM_PTE(PointerPte);

    //
    // Keep track of where initial nonpaged pool ends
    //
    MmNonPagedPoolEnd0 = (PVOID)((ULONG_PTR)MmNonPagedPoolStart +
                                 MmSizeOfNonPagedPoolInBytes);

    //
    // Validate and remember last allocated pool page
    //
    PointerPte = MiAddressToPte((PVOID)((ULONG_PTR)MmNonPagedPoolEnd0 - 1));
    ASSERT(PointerPte->u.Hard.Valid == 1);
    MiEndOfInitialPoolFrame = PFN_FROM_PTE(PointerPte);

    //
    // Validate the first nonpaged pool expansion page (which is a guard page)
    //
    PointerPte = MiAddressToPte(MmNonPagedPoolExpansionStart);
    ASSERT(PointerPte->u.Hard.Valid == 0);

    //
    // Calculate the size of the expansion region alone
    //
    MiExpansionPoolPagesInitialCharge =
    BYTES_TO_PAGES(MmMaximumNonPagedPoolInBytes - MmSizeOfNonPagedPoolInBytes);

    //
    // Remove 2 pages, since there's a guard page on top and on the bottom
    //
    MiExpansionPoolPagesInitialCharge -= 2;

    //
    // Now initialize the nonpaged pool expansion PTE space. Remember there's a
    // guard page on top so make sure to skip it. The bottom guard page will be
    // guaranteed by the fact our size is off by one.
    //
    MiInitializeSystemPtes(PointerPte + 1,
                           MiExpansionPoolPagesInitialCharge,
                           NonPagedPoolExpansion);
}
static int rtl8169_init(rtl8169 *r)
{
    //bigtime_t time;
    int err = -1;
    //addr_t temp;
    //int i;

    hal_mutex_init(&r->lock,DEBUG_MSG_PREFIX);


    SHOW_FLOW(2, "rtl8169_init: r %p\n", r);

    /*
     r->region = vm_map_physical_memory(vm_get_kernel_aspace_id(), "rtl8169_region", (void **)&r->virt_base, REGION_ADDR_ANY_ADDRESS, r->phys_size, LOCK_KERNEL|LOCK_RW, r->phys_base);
    if(r->region < 0) {
        SHOW_ERROR0(1, "rtl8169_init: error creating memory mapped region\n");
        err = -1;
        goto err;
    }*/

    size_t n_pages = BYTES_TO_PAGES(r->phys_size);

    hal_alloc_vaddress( (void **)&r->virt_base, n_pages); // alloc address of a page, but not memory
    hal_pages_control_etc( r->phys_base, (void *)r->virt_base, n_pages, page_map_io, page_rw, 0 );

    SHOW_INFO(2, "rtl8169 mapped at address 0x%lx\n", r->virt_base);

#if 0
    /* create regions for tx and rx descriptors */
    r->rxdesc_region = vm_create_anonymous_region(vm_get_kernel_aspace_id(), "rtl8169_rxdesc", (void **)&r->rxdesc,
                                                  REGION_ADDR_ANY_ADDRESS, NUM_RX_DESCRIPTORS * DESCRIPTOR_LEN, REGION_WIRING_WIRED_CONTIG, LOCK_KERNEL|LOCK_RW);
    r->rxdesc_phys = vtophys(r->rxdesc);
    SHOW_INFO(2, "rtl8169: rx descriptors at %p, phys 0x%x\n", r->rxdesc, r->rxdesc_phys);
    r->txdesc_region = vm_create_anonymous_region(vm_get_kernel_aspace_id(), "rtl8169_txdesc", (void **)&r->txdesc,
                                                  REGION_ADDR_ANY_ADDRESS, NUM_TX_DESCRIPTORS * DESCRIPTOR_LEN, REGION_WIRING_WIRED_CONTIG, LOCK_KERNEL|LOCK_RW);
    r->txdesc_phys = vtophys(r->txdesc);
    SHOW_INFO(2, "rtl8169: tx descriptors at %p, phys 0x%x\n", r->txdesc, r->txdesc_phys);
    r->reg_spinlock = 0;

    /* create a large tx and rx buffer for the descriptors to point to */
    r->rxbuf_region = vm_create_anonymous_region(vm_get_kernel_aspace_id(), "rtl8169_rxbuf", (void **)&r->rxbuf,
                                                 REGION_ADDR_ANY_ADDRESS, NUM_RX_DESCRIPTORS * BUFSIZE_PER_FRAME, REGION_WIRING_WIRED, LOCK_KERNEL|LOCK_RW);
    r->txbuf_region = vm_create_anonymous_region(vm_get_kernel_aspace_id(), "rtl8169_txbuf", (void **)&r->txbuf,
                                                 REGION_ADDR_ANY_ADDRESS, NUM_TX_DESCRIPTORS * BUFSIZE_PER_FRAME, REGION_WIRING_WIRED, LOCK_KERNEL|LOCK_RW);
#endif

    hal_pv_alloc( &r->rxdesc_phys, (void**)&r->rxdesc, NUM_RX_DESCRIPTORS * DESCRIPTOR_LEN );
    hal_pv_alloc( &r->txdesc_phys, (void**)&r->txdesc, NUM_TX_DESCRIPTORS * DESCRIPTOR_LEN );

    SHOW_INFO(2, "rx descriptors at %p, phys 0x%x\n", r->rxdesc, r->rxdesc_phys);
    SHOW_INFO(2, "tx descriptors at %p, phys 0x%x\n", r->txdesc, r->txdesc_phys);

    hal_pv_alloc( &r->rxbuf_phys, (void**)&r->rxbuf, NUM_RX_DESCRIPTORS * BUFSIZE_PER_FRAME );
    hal_pv_alloc( &r->txbuf_phys, (void**)&r->txbuf, NUM_TX_DESCRIPTORS * BUFSIZE_PER_FRAME );

    /* create a receive sem */
    hal_sem_init( &r->rx_sem, "rtl8169 rx_sem");

    /* transmit sem */
    hal_sem_init(  &r->tx_sem, "rtl8169 tx_sem");

    /* reset the chip */
    int repeats = 100;
    RTL_WRITE_8(r, REG_CR, (1<<4)); // reset the chip, disable tx/rx
    do {
        hal_sleep_msec(10); // 10ms
        if(repeats -- <= 0 )
            break;
    } while(RTL_READ_8(r, REG_CR) & (1<<4));

    /* read in the mac address */
    r->mac_addr[0] = RTL_READ_8(r, REG_IDR0);
    r->mac_addr[1] = RTL_READ_8(r, REG_IDR1);
    r->mac_addr[2] = RTL_READ_8(r, REG_IDR2);
    r->mac_addr[3] = RTL_READ_8(r, REG_IDR3);
    r->mac_addr[4] = RTL_READ_8(r, REG_IDR4);
    r->mac_addr[5] = RTL_READ_8(r, REG_IDR5);
    SHOW_INFO(2, "rtl8169: mac addr %x:%x:%x:%x:%x:%x\n",
              r->mac_addr[0], r->mac_addr[1], r->mac_addr[2],
              r->mac_addr[3], r->mac_addr[4], r->mac_addr[5]);

    /* some voodoo from BSD driver */
    RTL_WRITE_16(r, REG_CCR, RTL_READ_16(r, REG_CCR));
    RTL_SETBITS_16(r, REG_CCR, 0x3);

    /* mask all interrupts */
    RTL_WRITE_16(r, REG_IMR, 0);

    /* set up the tx/rx descriptors */
    rtl8169_setup_descriptors(r);

    /* enable tx/rx */
    RTL_SETBITS_8(r, REG_CR, (1<<3)|(1<<2));

    /* set up the rx state */
    /* 1024 byte dma threshold, 1024 dma max burst, CRC calc 8 byte+, accept all packets */
    RTL_WRITE_32(r, REG_RCR, (1<<16) | (6<<13) | (6<<8) | (0xf << 0));
    RTL_SETBITS_16(r, REG_CCR, (1<<5)); // rx checksum enable
    RTL_WRITE_16(r, REG_RMS, 1518); // rx mtu

    /* set up the tx state */
    RTL_WRITE_32(r, REG_TCR, (RTL_READ_32(r, REG_TCR) & ~0x1ff) | (6<<8)); // 1024 max burst dma
    RTL_WRITE_8(r, REG_MTPS, 0x3f); // max tx packet size (must be careful to not actually transmit more than mtu)

    /* set up the interrupt handler */
    //int_set_io_interrupt_handler(r->irq, &rtl8169_int, r, "rtl8169");
    if(hal_irq_alloc( r->irq, &rtl8169_int, r, HAL_IRQ_SHAREABLE ))
    {
        SHOW_ERROR( 0, "unable to allocate irq %d", r->irq );
        goto err1;
    }

    /* clear all pending interrupts */
    RTL_WRITE_16(r, REG_ISR, 0xffff);

    /* unmask interesting interrupts */
    RTL_WRITE_16(r, REG_IMR, IMR_SYSERR | IMR_LINKCHG | IMR_TER | IMR_TOK | IMR_RER | IMR_ROK | IMR_RXOVL);

    return 0;

err1:
    // TODO free what?
    //vm_delete_region(vm_get_kernel_aspace_id(), r->region);
//err:
    return err;
}
Exemple #16
0
VOID
sndStartDMA(
    IN    PGLOBAL_DEVICE_INFO pGDI,
    IN    int PlayBack
)
/*++

Routine Description:

    Allocate the adapter channel (this had better not wait !)

Arguments:

    pGDI - Pointer to the global device data

Return Value:

    None

--*/
{
    ULONG DataLong;

    //
    // Test if DMA is already running
    //

    ASSERT(pGDI->DMABusy == FALSE);

    pGDI->DMABusy = TRUE;


    dprintf5("sndStartDMA()");

    //
    // Program the DMA hardware (isn't this a bit illegal ?)
    //

    DataLong = 0;

    ((PDMA_CHANNEL_MODE)(&DataLong))->AccessTime = ACCESS_200NS;

    if (pGDI->BytesPerSample == 1) {
        ((PDMA_CHANNEL_MODE)(&DataLong))->TransferWidth = WIDTH_8BITS;
    } else {
        ((PDMA_CHANNEL_MODE)(&DataLong))->TransferWidth = WIDTH_16BITS;
    }


    if (PlayBack){

        ((PDMA_CHANNEL_MODE)(&DataLong))->BurstMode = 0x01;

        WRITE_REGISTER_ULONG(&DMA_CONTROL->Channel[SOUND_CHANNEL_A].Mode.Long,
                         DataLong);
        WRITE_REGISTER_ULONG(&DMA_CONTROL->Channel[SOUND_CHANNEL_B].Mode.Long,
                         DataLong);

    } else {

        WRITE_REGISTER_ULONG(&DMA_CONTROL->Channel[SOUND_CHANNEL_A+2].Mode.Long,
                         DataLong);
        WRITE_REGISTER_ULONG(&DMA_CONTROL->Channel[SOUND_CHANNEL_B+2].Mode.Long,
                         DataLong);

    }

    //
    // Allocate an adapter channel.  When the system allocates
    // the channel, processing will continue in the sndProgramDMA
    // routine below.
    //


    if (PlayBack) {
        dprintf4("Allocating adapter channel (buffer = 0)");
        IoAllocateAdapterChannel(pGDI->pAdapterObject[0],
                             pGDI->pWaveOutDevObj,
                             BYTES_TO_PAGES(pGDI->DmaHalfBufferSize),
                             sndProgramDMA,
                             (PVOID)0);         // Context
    } else {
        dprintf4("Allocating adapter channel (buffer = 2)");
        IoAllocateAdapterChannel(pGDI->pAdapterObject[2],
                             pGDI->pWaveInDevObj,
                             BYTES_TO_PAGES(pGDI->DmaHalfBufferSize),
                             sndProgramDMA,
                             (PVOID)0);         // Context
    }

    //
    // Execution will continue in sndProgramDMA when the
    // adapter has been allocated
    //

}
Exemple #17
0
static grub_err_t
grub_cmd_linux (grub_command_t cmd __attribute__ ((unused)),
                int argc, char *argv[])
{
    grub_file_t file = 0;
    struct linux_kernel_header lh;
    grub_ssize_t len, start, filelen;
    void *kernel = NULL;

    grub_dl_ref (my_mod);

    if (argc == 0)
    {
        grub_error (GRUB_ERR_BAD_ARGUMENT, N_("filename expected"));
        goto fail;
    }

    file = grub_file_open (argv[0]);
    if (! file)
        goto fail;

    filelen = grub_file_size (file);

    kernel = grub_malloc(filelen);

    if (!kernel)
    {
        grub_error (GRUB_ERR_OUT_OF_MEMORY, N_("cannot allocate kernel buffer"));
        goto fail;
    }

    if (grub_file_read (file, kernel, filelen) != filelen)
    {
        grub_error (GRUB_ERR_FILE_READ_ERROR, N_("Can't read kernel %s"), argv[0]);
        goto fail;
    }

    grub_tpm_measure (kernel, filelen, GRUB_KERNEL_PCR);

    if (! grub_linuxefi_secure_validate (kernel, filelen))
    {
        grub_error (GRUB_ERR_INVALID_COMMAND, N_("%s has invalid signature"), argv[0]);
        grub_free (kernel);
        goto fail;
    }

    params = grub_efi_allocate_pages_max (0x3fffffff, BYTES_TO_PAGES(16384));

    if (! params)
    {
        grub_error (GRUB_ERR_OUT_OF_MEMORY, "cannot allocate kernel parameters");
        goto fail;
    }

    grub_memset (params, 0, 16384);

    grub_memcpy (&lh, kernel, sizeof (lh));

    if (lh.boot_flag != grub_cpu_to_le16 (0xaa55))
    {
        grub_error (GRUB_ERR_BAD_OS, N_("invalid magic number"));
        goto fail;
    }

    if (lh.setup_sects > GRUB_LINUX_MAX_SETUP_SECTS)
    {
        grub_error (GRUB_ERR_BAD_OS, N_("too many setup sectors"));
        goto fail;
    }

    if (lh.version < grub_cpu_to_le16 (0x020b))
    {
        grub_error (GRUB_ERR_BAD_OS, N_("kernel too old"));
        goto fail;
    }

    if (!lh.handover_offset)
    {
        grub_error (GRUB_ERR_BAD_OS, N_("kernel doesn't support EFI handover"));
        goto fail;
    }

    linux_cmdline = grub_efi_allocate_pages_max(0x3fffffff,
                    BYTES_TO_PAGES(lh.cmdline_size + 1));

    if (!linux_cmdline)
    {
        grub_error (GRUB_ERR_OUT_OF_MEMORY, N_("can't allocate cmdline"));
        goto fail;
    }

    grub_memcpy (linux_cmdline, LINUX_IMAGE, sizeof (LINUX_IMAGE));
    grub_create_loader_cmdline (argc, argv,
                                linux_cmdline + sizeof (LINUX_IMAGE) - 1,
                                lh.cmdline_size - (sizeof (LINUX_IMAGE) - 1));

    grub_pass_verity_hash(&lh, linux_cmdline);
    lh.cmd_line_ptr = (grub_uint32_t)(grub_uint64_t)linux_cmdline;

    handover_offset = lh.handover_offset;

    start = (lh.setup_sects + 1) * 512;
    len = grub_file_size(file) - start;

    kernel_mem = grub_efi_allocate_pages(lh.pref_address,
                                         BYTES_TO_PAGES(lh.init_size));

    if (!kernel_mem)
        kernel_mem = grub_efi_allocate_pages_max(0x3fffffff,
                     BYTES_TO_PAGES(lh.init_size));

    if (!kernel_mem)
    {
        grub_error (GRUB_ERR_OUT_OF_MEMORY, N_("can't allocate kernel"));
        goto fail;
    }

    grub_memcpy (kernel_mem, (char *)kernel + start, len);
    grub_loader_set (grub_linuxefi_boot, grub_linuxefi_unload, 0);
    loaded=1;

    lh.code32_start = (grub_uint32_t)(grub_uint64_t) kernel_mem;
    grub_memcpy (params, &lh, 2 * 512);

    params->type_of_loader = 0x21;

fail:

    if (file)
        grub_file_close (file);

    if (kernel)
        grub_free (kernel);

    if (grub_errno != GRUB_ERR_NONE)
    {
        grub_dl_unref (my_mod);
        loaded = 0;
    }

    if (linux_cmdline && !loaded)
        grub_efi_free_pages((grub_efi_physical_address_t)linux_cmdline, BYTES_TO_PAGES(lh.cmdline_size + 1));

    if (kernel_mem && !loaded)
        grub_efi_free_pages((grub_efi_physical_address_t)kernel_mem, BYTES_TO_PAGES(kernel_size));

    if (params && !loaded)
        grub_efi_free_pages((grub_efi_physical_address_t)params, BYTES_TO_PAGES(16384));

    return grub_errno;
}
Exemple #18
0
static grub_err_t
grub_cmd_initrd (grub_command_t cmd __attribute__ ((unused)),
                 int argc, char *argv[])
{
    grub_file_t *files = 0;
    int i, nfiles = 0;
    grub_size_t size = 0;
    grub_uint8_t *ptr;

    if (argc == 0)
    {
        grub_error (GRUB_ERR_BAD_ARGUMENT, N_("filename expected"));
        goto fail;
    }

    if (!loaded)
    {
        grub_error (GRUB_ERR_BAD_ARGUMENT, N_("you need to load the kernel first"));
        goto fail;
    }

    files = grub_zalloc (argc * sizeof (files[0]));
    if (!files)
        goto fail;

    for (i = 0; i < argc; i++)
    {
        grub_file_filter_disable_compression ();
        files[i] = grub_file_open (argv[i]);
        if (! files[i])
            goto fail;
        nfiles++;
        size += ALIGN_UP (grub_file_size (files[i]), 4);
    }

    initrd_mem = grub_efi_allocate_pages_max (0x3fffffff, BYTES_TO_PAGES(size));

    if (!initrd_mem)
    {
        grub_error (GRUB_ERR_OUT_OF_MEMORY, N_("can't allocate initrd"));
        goto fail;
    }

    params->ramdisk_size = size;
    params->ramdisk_image = (grub_uint32_t)(grub_uint64_t) initrd_mem;

    ptr = initrd_mem;

    for (i = 0; i < nfiles; i++)
    {
        grub_ssize_t cursize = grub_file_size (files[i]);
        if (grub_file_read (files[i], ptr, cursize) != cursize)
        {
            if (!grub_errno)
                grub_error (GRUB_ERR_FILE_READ_ERROR, N_("premature end of file %s"),
                            argv[i]);
            goto fail;
        }
        grub_tpm_measure (ptr, cursize, GRUB_INITRD_PCR);
        ptr += cursize;
        grub_memset (ptr, 0, ALIGN_UP_OVERHEAD (cursize, 4));
        ptr += ALIGN_UP_OVERHEAD (cursize, 4);
    }

    params->ramdisk_size = size;

fail:
    for (i = 0; i < nfiles; i++)
        grub_file_close (files[i]);
    grub_free (files);

    if (initrd_mem && grub_errno)
        grub_efi_free_pages((grub_efi_physical_address_t)initrd_mem, BYTES_TO_PAGES(size));

    return grub_errno;
}
Exemple #19
0
NTSTATUS kdi_ConfigCallBack
(
/* INPUT PARAMETERS:  */

    dVoidPtr context,
    PUNICODE_STRING path_name,
    INTERFACE_TYPE bus_type,
    dUDWord bus_number,
    PKEY_VALUE_FULL_INFORMATION *bus_information,
    CONFIGURATION_TYPE controller_type,
    dUDWord controller_number,
    PKEY_VALUE_FULL_INFORMATION *controller_information,
    CONFIGURATION_TYPE peripheral_type,
    dUDWord peripheral_number,
    PKEY_VALUE_FULL_INFORMATION *peripheral_information

/* UPDATE PARAMETERS: */

/* OUTPUT PARAMETERS: */

)
/* COMMENTS: *****************************************************************
 *
 * Routine Description:
 *
 *    This routine is used to acquire all of the configuration
 *    information for each floppy disk controller and the
 *    peripheral driver attached to that controller.
 *
 * Arguments:
 *
 *    context - Pointer to the confuration information we are building
 *                    up.
 *
 *    path_name - unicode registry path.    Not Used.
 *
 *    bus_type - Internal, Isa, ...
 *
 *    bus_number - Which bus if we are on a multibus system.
 *
 *    bus_information - Configuration information about the bus. Not Used.
 *
 *    controller_type - Should always be DiskController.
 *
 *    controller_number - Which controller if there is more than one
 *                                controller in the system.
 *
 *    controller_information - Array of pointers to the three pieces of
 *                                    registry information.
 *
 *    peripheral_type - Should always be FloppyDiskPeripheral.
 *
 *    peripheral_number - Which floppy if this controller is maintaining
 *                                more than one.
 *
 *    peripheral_information - Array of pointers to the three pieces of
 *                                    registry information.
 *
 * Return Value:
 *
 *    STATUS_SUCCESS if everything went ok, or STATUS_INSUFFICIENT_RESOURCES
 *    if it couldn't map the base csr or acquire the adapter object, or
 *    all of the resource information couldn't be acquired.
 *
 * DEFINITIONS: *************************************************************/
{

/* DATA: ********************************************************************/

    /* So we don't have to typecast the context. */
    ConfigDataPtr config = context;

    /* Simple iteration variable. */
    dUDWord i;

    /* This boolean will be used to denote whether we've seen this */
    /* controller before. */
    dBoolean new_controller;

    /* This will be used to denote whether we even have room */
    /* for a new controller. */
    dBoolean out_of_room;

    /* Iteration variable that will end up indexing to where */
    /* the controller information should be placed. */
    dUDWord controller_slot;

    /* Short hand for referencing the particular controller config */
    /* information that we are building up. */
    ConfigControllerDataPtr controller;

#if !MULTI_CONTROLLER
    PCM_FULL_RESOURCE_DESCRIPTOR peripheral_data_ptr = (PCM_FULL_RESOURCE_DESCRIPTOR)
        (((dUBytePtr)peripheral_information[IoQueryDeviceConfigurationData]) +
        peripheral_information[IoQueryDeviceConfigurationData]->DataOffset);
#endif

    /* These three boolean will tell us whether we got all the */
    /* information that we needed. */
    dBoolean found_port = FALSE;
    dBoolean found_interrupt = FALSE;
    dBoolean found_dma = FALSE;

/* CODE: ********************************************************************/

    ASSERT(controller_type == DiskController);
#if !MULTI_CONTROLLER
    ASSERT(peripheral_type == FloppyDiskPeripheral);
#endif

    /* Loop through the "slots" that we have for a new controller. */
    /* Determine if this is a controller that we've already seen, */
    /* or a new controller. */

    out_of_room = dTRUE;
    for (
        controller_slot = 0;
        controller_slot < MAXIMUM_CONTROLLERS_PER_MACHINE;
        controller_slot++
        ) {

        if (config->controller[controller_slot].actual_controller_number == -1) {

            new_controller = dTRUE;
            out_of_room = dFALSE;
            config->controller[controller_slot].actual_controller_number =
            controller_number;
            config->number_of_controllers++;
            break;

        } else if (config->controller[controller_slot].actual_controller_number
                        == (LONG)controller_number) {

            new_controller = dFALSE;
            out_of_room = dFALSE;
            break;

        }

    }

    if (out_of_room) {

        /* Just return and ignore the controller. */

        return STATUS_SUCCESS;

    }

    controller = &config->controller[controller_slot];

    if (new_controller) {

        PCM_FULL_RESOURCE_DESCRIPTOR controller_data =
                (PCM_FULL_RESOURCE_DESCRIPTOR)
                (((dUBytePtr)controller_information[IoQueryDeviceConfigurationData]) +
                controller_information[IoQueryDeviceConfigurationData]->DataOffset);

        /* We have the pointer. Save off the interface type and */
        /* the busnumber for use when we call the Hal and the */
        /* Io System. */

        controller->interface_type = bus_type;
        controller->bus_number = bus_number;
        controller->sharable_vector = dTRUE;
        controller->save_float_state = dFALSE;

        /* We need to get the following information out of the partial */
        /* resource descriptors. */

        /* The irql and vector. */

        /* The dma channel. */

        /* The base address and span covered by the floppy controllers */
        /* registers. */

        /* It is not defined how these appear in the partial resource */
        /* lists, so we will just loop over all of them.    If we find */
        /* something we don't recognize, we drop that information on */
        /* the floor.    When we have finished going through all the */
        /* partial information, we validate that we got the above */
        /* three. */

        kdi_CheckedDump(QIC117INFO,
                        "Q117i: path: %ls\n",
                        (ULONG)path_name->Buffer);
        kdi_CheckedDump(QIC117INFO,
                        "Q117i: adding controller: %08x\n",
                        controller_number);
        kdi_CheckedDump(QIC117INFO,
                        "Q117i: adding controller slot: %08x\n",
                        controller_slot);

        for (
            i = 0;
            i < controller_data->PartialResourceList.Count;
            i++
            ) {

            PCM_PARTIAL_RESOURCE_DESCRIPTOR partial =
                &controller_data->PartialResourceList.PartialDescriptors[i];

            switch (partial->Type) {

                case CmResourceTypePort: {

                    dBoolean in_io_space = !!partial->Flags;
                    found_port = dTRUE;

                    /* Save of the pointer to the partial so */
                    /* that we can later use it to report resources */
                    /* and we can also use this later in the routine */
                    /* to make sure that we got all of our resources. */

                    controller->span_of_controller_address =
                            partial->u.Port.Length;
                    controller->original_base_address =
                            partial->u.Port.Start;
                    controller->controller_base_address =
                            kdi_GetControllerBase(
                                bus_type,
                                bus_number,
                                partial->u.Port.Start,
                                controller->span_of_controller_address,
                                in_io_space,
                                &controller->mapped_address
                                );

                    if (!controller->controller_base_address) {

                            return STATUS_INSUFFICIENT_RESOURCES;

                    }

                    break;
                }
                case CmResourceTypeInterrupt: {

                    found_interrupt = dTRUE;

                    if (partial->Flags & CM_RESOURCE_INTERRUPT_LATCHED) {

                        controller->interrupt_mode = Latched;

                    } else {

                        controller->interrupt_mode = LevelSensitive;

                    }

                    controller->original_irql =  partial->u.Interrupt.Level;
                    controller->original_vector = partial->u.Interrupt.Vector;
                    controller->controller_vector =
                            HalGetInterruptVector(
                                bus_type,
                                bus_number,
                                partial->u.Interrupt.Level,
                                partial->u.Interrupt.Vector,
                                &controller->controller_irql,
                                &controller->processor_mask
                                );

                    break;
                }
                case CmResourceTypeDma: {

                    DEVICE_DESCRIPTION device_desc;

                    RtlZeroMemory(&device_desc,sizeof(device_desc));
                    found_dma = dTRUE;

                    controller->original_dma_channel = partial->u.Dma.Channel;

                    device_desc.Version = DEVICE_DESCRIPTION_VERSION;
                    if (partial->u.Dma.Channel > 3) {
                        device_desc.DmaWidth = Width16Bits;
                    } else {
                        device_desc.DmaWidth = Width8Bits;
                    }
                    device_desc.DemandMode = dTRUE;
                    device_desc.MaximumLength = 32l*1024l;
                    device_desc.AutoInitialize = dFALSE;
                    device_desc.ScatterGather = dFALSE;
                    device_desc.DmaChannel = partial->u.Dma.Channel;
                    device_desc.InterfaceType = bus_type;
                    device_desc.DmaSpeed = TypeA;
                    controller->number_of_map_registers = BYTES_TO_PAGES(32l*1024l);
                    controller->adapter_object =
                            HalGetAdapter(
                                &device_desc,
                                &controller->number_of_map_registers
                                );

                    kdi_CheckedDump(QIC117INFO,
                                    "Q117i: Bus Type = %08x\n",
                                    bus_type);

                    kdi_CheckedDump(QIC117INFO,
                                    "Q117i: Number of map registers = %08x\n",
                                    controller->number_of_map_registers );

                    if (!controller->adapter_object) {

                            return STATUS_INSUFFICIENT_RESOURCES;

                    }

                    break;

                }
                default: {

                    break;

                }

            }

        }

        /* If we didn't get all the information then we return */
        /* insufficient resources. */

        if ((!found_port) ||
            (!found_interrupt) ||
            (!found_dma)) {

            return STATUS_INSUFFICIENT_RESOURCES;

        }
        controller->number_of_tape_drives++;
        controller->ok_to_use_this_controller = dTRUE;

        {
            //
            // Get extra information about the floppy controller
            //

            RTL_QUERY_REGISTRY_TABLE    paramTable[2];
            ULONG apiSupported;
            WCHAR idstr[200];
            UNICODE_STRING str;

            str.Length = 0;
            str.MaximumLength = 200;
            str.Buffer = idstr;

            RtlZeroMemory(&paramTable[0], sizeof(paramTable));

            paramTable[0].Flags = RTL_QUERY_REGISTRY_DIRECT;
            paramTable[0].Name = L"APISupported";
            paramTable[0].EntryContext = &str;
            paramTable[0].DefaultType = REG_SZ;
            paramTable[0].DefaultData = L"";
            paramTable[0].DefaultLength = sizeof(WCHAR);


            if (!NT_SUCCESS(RtlQueryRegistryValues(
                RTL_REGISTRY_ABSOLUTE | RTL_REGISTRY_OPTIONAL,
                path_name->Buffer, &paramTable[0], NULL, NULL)))
            {
                str.Buffer[0] = 0;
            }

            controller->controller_data.clk_48mhz = FALSE;
            controller->controller_data.floppyEnablerApiSupported = FALSE;

            if (str.Buffer[0] != 0) {
                NTSTATUS ntStatus;
                PFILE_OBJECT file;      // file object is not needed,  but returned by API

                kdi_CheckedDump(QIC117INFO,
                       "Q117i: Got registry setting for EnablerAPI = %ls\n",
                        (ULONG)str.Buffer );

                ntStatus = IoGetDeviceObjectPointer(
                                &str,
                                FILE_READ_ACCESS,
                                &file,
                                &controller->controller_data.apiDeviceObject);

                if (NT_SUCCESS(ntStatus)) {
                    FDC_INFORMATION info;

                    controller->controller_data.floppyEnablerApiSupported = TRUE;

                    //
                    // set the DMA direction to unknown,  thereby forcing a
                    // call to set the direction
                    //
                    controller->controller_data.dmaDirection = 0xff;

                    ntStatus = kdi_FloppyEnabler(
                                    controller->controller_data.apiDeviceObject,
                                    IOCTL_GET_FDC_INFO, &info);

                    //
                    //  We got the info for the FDC,  now check for a 48MHz clock
                    //
                    if (NT_SUCCESS(ntStatus)) {
                        controller->controller_data.clk_48mhz =
                            (info.ClockRatesSupported == FDC_CLOCK_48MHZ);
                    }


                } else {
                    kdi_CheckedDump(QIC117DBGP,
                       "Q117i: Got registry setting for EnablerAPI = %ls but failed to open channel to device\n",
                        (ULONG)str.Buffer );
                }
            }

        }

    }

    kdi_CheckedDump(QIC117INFO,
            "Q117i: Got setting for 48mhz clock setting = %x\n",
            controller->controller_data.clk_48mhz );



    return STATUS_SUCCESS;
}
Exemple #20
0
 ZVMSTATUS ZVMAPI HvmSubvertCpu (
     void * GuestRsp
 )
 { // UnFinished!!!!! The most important
     PCPU Cpu;//It will be used as the hypervisor struct.
     void * HostKernelStackBase;
     ZVMSTATUS Status;
     ZION_PHYSICAL_ADDRESS HostStackPA;
 
     // allocate memory for host stack, 16 * 4k
     HostKernelStackBase = MmAllocPages(HOST_STACK_SIZE_IN_PAGES, (uint32_t *)&HostStackPA);
	 
	 
     if (!HostKernelStackBase) 
     {
         cprintf("HvmSubvertCpu(): Failed to allocate %d pages for the host stack\n",HOST_STACK_SIZE_IN_PAGES);
         //return STATUS_INSUFFICIENT_RESOURCES;
 		 return -1;
     }
 
     // unchecked -8 or -4 ?
     Cpu = (PCPU) ((char *) HostKernelStackBase + HOST_STACK_SIZE_IN_PAGES * Zion_PageSize - 4 - sizeof (CPU));
     Cpu->HostStack = HostKernelStackBase;
      
     // for interrupt handlers which will address CPU through the FS
	 Cpu->SelfPointer = Cpu;

     Cpu->ProcessorNumber = 0;
	 //Cpu->ProcessorNumber = KeGetCurrentProcessorNumber();
     //Cpu->ProcessorNumber = NumberOfProcessors;
 
     //Cpu->Nested = FALSE;
 

     InitializeListHead (&Cpu->GeneralTrapsList);
     InitializeListHead (&Cpu->MsrTrapsList);
//    // InitializeListHead (&Cpu->IoTrapsList);


     Cpu->GdtArea = (PSEGMENT_DESCRIPTOR)MmAllocPages(BYTES_TO_PAGES(BP_GDT_LIMIT),NULL);//Currently we create our own GDT and IDT area
     if (!Cpu->GdtArea) 
     {
         cprintf(("HvmSubvertCpu(): Failed to allocate memory for GDT\n"));
         //return STATUS_INSUFFICIENT_RESOURCES;
 		 return -1;
     }
// 
     Cpu->IdtArea = (PSEGMENT_DESCRIPTOR)MmAllocPages(BYTES_TO_PAGES(BP_IDT_LIMIT),NULL);
     if (!Cpu->IdtArea) 
     {
         cprintf(("HvmSubvertCpu(): Failed to allocate memory for IDT\n"));
         //return STATUS_INSUFFICIENT_RESOURCES;
		 return -1;
     }
// 	

 	Status = Hvm->ArchRegisterTraps(Cpu);//<----------------3.1 Finish
 	///Status =  g_HvmControl->ApplyTraps(Cpu);
     if (!ZVM_SUCCESS(Status)) 
     {
 		cprintf("Helloworld:HvmSubvertCpu(): Failed to register NewBluePill traps, status 0x%08hX\n");
         //return EFI_LOAD_ERROR;
 		return -1;
     }
// 

     Status = Hvm->ArchInitialize (Cpu, (void *)&CmSlipIntoMatrix, GuestRsp);//<----------------3.2 Finish

// 	Status = Hvm->ArchInitialize (Cpu, (PVOID) (UINTN)CmSlipIntoMatrix, GuestRsp);   // Can't use CmSlipIntoMatrix by PVOID
 	if (!ZVM_SUCCESS (Status)) 
     {
         cprintf("Helloworld:HvmSubvertCpu(): ArchInitialize() failed with status 0x%08hX\n");
         return Status;
     }
     


//     // no API calls allowed below this point: we have overloaded GDTR and selectors
//     // unchecked


      HvmSetupGdt (Cpu);//<----------------3.3 Finish

	  HvmSetupIdt (Cpu);//<----------------3.4 Finish
    
// 
     Status = Hvm->ArchVirtualize(Cpu);//<----------------3.5 Finish

     cprintf("Wrong again...\n");
     return Status;
}
Exemple #21
0
BOOLEAN CreateDMA(PDEVICE_OBJECT DeviceObject)
{
    DEVICE_DESCRIPTION Desc;
    ULONG MappedRegs = 0;
    PDEVICE_EXTENSION Device = DeviceObject->DeviceExtension;
    KEVENT DMAEvent;
    KIRQL OldIrql;

    // Buffersize should already be set but it isn't yet !
    Device->BufferSize = SB_BUFSIZE;
    DPRINT("Bufsize == %u\n", Device->BufferSize);

    RtlZeroMemory(&Desc, sizeof(DEVICE_DESCRIPTION));

    // Init memory!
    Desc.Version = DEVICE_DESCRIPTION_VERSION;
    Desc.Master = FALSE;    // Slave
    Desc.ScatterGather = FALSE; // Don't think so anyway
    Desc.DemandMode = FALSE;    // == !SingleModeDMA
    Desc.AutoInitialize = TRUE; // ?
    Desc.Dma32BitAddresses = FALSE; // I don't think we can
    Desc.IgnoreCount = FALSE; // Should be OK
    Desc.Reserved1 = 0;
//    Desc.Reserved2 = 0;
    Desc.BusNumber = 0;
    Desc.DmaChannel = Device->DMA;    // Our channel :)
    Desc.InterfaceType = Isa;   // (BusType == MicroChannel) ? MicroChannel : Isa;
    Desc.DmaWidth = 0;    // hmm... 8 bits?
    Desc.DmaSpeed = 0;     // double hmm (Compatible it should be)
    Desc.MaximumLength = Device->BufferSize;
//    Desc.MinimumLength = 0;
    Desc.DmaPort = 0;

    DPRINT("Calling HalGetAdapter(), asking for %d mapped regs\n", MappedRegs);

    Device->Adapter = HalGetAdapter(&Desc, &MappedRegs);

    DPRINT("Called\n");

    if (! Device->Adapter)
    {
        DPRINT("HalGetAdapter() FAILED\n");
        return FALSE;
    }

    DPRINT("Bufsize == %u\n", Device->BufferSize);

    if (MappedRegs < BYTES_TO_PAGES(Device->BufferSize))
    {
        DPRINT("Could only allocate %u mapping registers\n", MappedRegs);

        if (MappedRegs == 0)
            return FALSE;

        Device->BufferSize = MappedRegs * PAGE_SIZE;
        DPRINT("Bufsize == %u\n", Device->BufferSize);
    }

    DPRINT("Allocated %u mapping registers\n", MappedRegs);

    // Check if we already have memory here...

    // Check to make sure we're >= minimum

    DPRINT("Allocating buffer\n");

    DPRINT("Bufsize == %u\n", Device->BufferSize);

    Device->VirtualBuffer = HalAllocateCommonBuffer(Device->Adapter, Device->BufferSize,
                                                &Device->Buffer, FALSE);

    // For some reason BufferSize == 0 here?!
//    DPRINT("Buffer == 0x%x Bufsize == %u\n", Device->Buffer, Device->BufferSize);
    DPRINT("Bufsize == %u,", Device->BufferSize);
    DPRINT("Buffer == 0x%x\n", Device->Buffer);

    if (! Device->VirtualBuffer)
    {
        DPRINT("Could not allocate buffer :(\n");
        // should try again with smaller buffer...
        return FALSE;
    }

//    DPRINT("Buffer == 0x%x Bufsize == %u\n", Device->Buffer, Device->BufferSize);
    DPRINT("Bufsize == %u,", Device->BufferSize);
    DPRINT("Buffer == 0x%x\n", Device->Buffer);

    DPRINT("Calling IoAllocateMdl()\n");
    Device->Mdl = IoAllocateMdl(Device->VirtualBuffer, Device->BufferSize, FALSE, FALSE, NULL);
    DPRINT("Bufsize == %u\n", Device->BufferSize);

    // IS THIS RIGHT:
    if (! Device->Mdl)
    {
        DPRINT("IoAllocateMdl() FAILED\n");
        // Free the HAL buffer
        return FALSE;
    }

    DPRINT("VBuffer == 0x%x Mdl == %u Bufsize == %u\n", Device->VirtualBuffer, Device->Mdl, Device->BufferSize);

    DPRINT("Calling MmBuildMdlForNonPagedPool\n");
    MmBuildMdlForNonPagedPool(Device->Mdl);

    DPRINT("Bufsize == %u\n", Device->BufferSize);

    // part II:
    KeInitializeEvent(&DMAEvent, SynchronizationEvent, FALSE);
    // Raise IRQL
    KeRaiseIrql(DISPATCH_LEVEL,&OldIrql);
    IoAllocateAdapterChannel(Device->Adapter, DeviceObject,
                            BYTES_TO_PAGES(Device->BufferSize),
                            SoundProgramDMA, &DMAEvent);
    // Lower IRQL
    KeLowerIrql(OldIrql);
    DPRINT("VBuffer == 0x%x Bufsize == %u\n", Device->VirtualBuffer, Device->BufferSize);
    KeWaitForSingleObject(&DMAEvent, Executive, KernelMode, FALSE, NULL);


//    if (MappedRegs == 0)
//        MappedRegs = 2;
//    else
//        MappedRegs ++;


//    Status = IoAllocateAdapterChannel(
//                    Adapter,
//                    DeviceObject,
//                    MappedRegs,
//                    CALLBACK,
//                    DeviceObject); // Context
    return TRUE;
}
Exemple #22
0
                            0xf1f1f1f1);
#endif

        RtlCopyMemory (&InPageSupport->Mdl, Mdl, Mdl->Size);

        FreeMdl = Mdl;

        Mdl = &InPageSupport->Mdl;

        ASSERT (((ULONG_PTR)Mdl & (sizeof(QUAD) - 1)) == 0);
        InPageSupport->u1.e1.PrefetchMdlHighBits = ((ULONG_PTR)Mdl >> 3);
    }

    ASSERT (MdlPages != 0);

    ASSERT (Mdl->Size - sizeof(MDL) == BYTES_TO_PAGES(Mdl->ByteCount) * sizeof(PFN_NUMBER));

    DummyPfn1->u3.e2.ReferenceCount =
        (USHORT)(DummyPfn1->u3.e2.ReferenceCount - NumberOfPagesNeedingIo);
    
    //
    // Unlock page containing prototype PTEs.
    //

    if (PfnProto != NULL) {
        ASSERT (PfnProto->u3.e2.ReferenceCount > 1);
        MI_REMOVE_LOCKED_PAGE_CHARGE_AND_DECREF (PfnProto);
    }

    UNLOCK_PFN (OldIrql);
Exemple #23
0
NTSTATUS
PLxInitializeDeviceExtension(
    IN PDEVICE_EXTENSION DevExt
    )
/*++
Routine Description:

    This routine is called by EvtDeviceAdd. Here the device context is
    initialized and all the software resources required by the device is
    allocated.

Arguments:

    DevExt     Pointer to the Device Extension

Return Value:

     NTSTATUS

--*/
{
    NTSTATUS    status;
    ULONG       dteCount;
    WDF_IO_QUEUE_CONFIG  queueConfig;

    PAGED_CODE();

    //
    // Set Maximum Transfer Length (which must be less than the SRAM size).
    //
    DevExt->MaximumTransferLength = PCI9656_MAXIMUM_TRANSFER_LENGTH;
    if(DevExt->MaximumTransferLength > PCI9656_SRAM_SIZE) {
        DevExt->MaximumTransferLength = PCI9656_SRAM_SIZE;
    }

    TraceEvents(TRACE_LEVEL_INFORMATION, DBG_PNP,
                "MaximumTransferLength %d", DevExt->MaximumTransferLength);

    //
    // Calculate the number of DMA_TRANSFER_ELEMENTS + 1 needed to
    // support the MaximumTransferLength.
    //
    dteCount = BYTES_TO_PAGES((ULONG) ROUND_TO_PAGES(
        DevExt->MaximumTransferLength) + PAGE_SIZE);

    TraceEvents(TRACE_LEVEL_INFORMATION, DBG_PNP, "Number of DTEs %d", dteCount);

    //
    // Set the number of DMA_TRANSFER_ELEMENTs (DTE) to be available.
    //
    DevExt->WriteTransferElements = dteCount;
    DevExt->ReadTransferElements  = dteCount;

    //
    // The PCI9656 has two DMA Channels. This driver will use DMA Channel 0
    // as the "ToDevice" channel (Writes) and DMA Channel 1 as the
    // "From Device" channel (Reads).
    //
    // In order to support "duplex" DMA operation (the ability to have
    // concurrent reads and writes) two Dispatch Queues are created:
    // one for the Write (ToDevice) requests and another for the Read
    // (FromDevice) requests.  While eache Dispatch Queue will operate
    // independently for each other, the requests within a given Dispatch
    // Queue will be serialized. This is hardware can only process one request
    // per DMA Channel at a time.
    //


    //
    // Setup a queue to handle only IRP_MJ_WRITE requests in Sequential
    // dispatch mode. This mode ensures there is only one write request
    // outstanding in the driver at any time. Framework will present the next
    // request only if the current request is completed.
    // Since we have configured the queue to dispatch all the specific requests
    // we care about, we don't need a default queue.  A default queue is
    // used to receive requests that are not preconfigured to goto
    // a specific queue.
    //
    WDF_IO_QUEUE_CONFIG_INIT ( &queueConfig,
                              WdfIoQueueDispatchSequential);

    queueConfig.EvtIoWrite = PLxEvtIoWrite;

    //
    // Static Driver Verifier (SDV) displays a warning if it doesn't find the 
    // EvtIoStop callback on a power-managed queue. The 'assume' below lets 
    // SDV know not to worry about the EvtIoStop.
    // If not explicitly set, the framework creates power-managed queues when 
    // the device is not a filter driver.  Normally the EvtIoStop is required
    // for power-managed queues, but for this driver it is not need b/c the 
    // driver doesn't hold on to the requests for long time or forward them to
    // other drivers. 
    // If the EvtIoStop callback is not implemented, the framework 
    // waits for all in-flight (driver owned) requests to be done before 
    // moving the device in the Dx/sleep states or before removing the device,
    // which is the correct behavior for this type of driver.
    // If the requests were taking an undetermined amount of time to complete,
    // or the requests were forwarded to a lower driver/another stack, the 
    // queue should have an EvtIoStop/EvtIoResume.
    //
    __analysis_assume(queueConfig.EvtIoStop != 0);
    status = WdfIoQueueCreate( DevExt->Device,
                               &queueConfig,
                               WDF_NO_OBJECT_ATTRIBUTES,
                               &DevExt->WriteQueue );
    __analysis_assume(queueConfig.EvtIoStop == 0);
    
    if(!NT_SUCCESS(status)) {
        TraceEvents(TRACE_LEVEL_ERROR, DBG_PNP,
                    "WdfIoQueueCreate failed: %!STATUS!", status);
        return status;
    }

    //
    // Set the Write Queue forwarding for IRP_MJ_WRITE requests.
    //
    status = WdfDeviceConfigureRequestDispatching( DevExt->Device,
                                       DevExt->WriteQueue,
                                       WdfRequestTypeWrite);

    if(!NT_SUCCESS(status)) {
        TraceEvents(TRACE_LEVEL_ERROR, DBG_PNP,
                    "DeviceConfigureRequestDispatching failed: %!STATUS!", status);
        return status;
    }


    //
    // Create a new IO Queue for IRP_MJ_READ requests in sequential mode.
    //
    WDF_IO_QUEUE_CONFIG_INIT( &queueConfig,
                              WdfIoQueueDispatchSequential);

    queueConfig.EvtIoRead = PLxEvtIoRead;

    //
    // By default, Static Driver Verifier (SDV) displays a warning if it 
    // doesn't find the EvtIoStop callback on a power-managed queue. 
    // The 'assume' below causes SDV to suppress this warning. If the driver 
    // has not explicitly set PowerManaged to WdfFalse, the framework creates
    // power-managed queues when the device is not a filter driver.  Normally 
    // the EvtIoStop is required for power-managed queues, but for this driver
    // it is not needed b/c the driver doesn't hold on to the requests for 
    // long time or forward them to other drivers. 
    // If the EvtIoStop callback is not implemented, the framework waits for
    // all driver-owned requests to be done before moving in the Dx/sleep 
    // states or before removing the device, which is the correct behavior 
    // for this type of driver. If the requests were taking an indeterminate
    // amount of time to complete, or if the driver forwarded the requests
    // to a lower driver/another stack, the queue should have an 
    // EvtIoStop/EvtIoResume.
    //
    __analysis_assume(queueConfig.EvtIoStop != 0);
    status = WdfIoQueueCreate( DevExt->Device,
                               &queueConfig,
                               WDF_NO_OBJECT_ATTRIBUTES,
                               &DevExt->ReadQueue );
    __analysis_assume(queueConfig.EvtIoStop == 0);
    
    if(!NT_SUCCESS(status)) {
        TraceEvents(TRACE_LEVEL_ERROR, DBG_PNP,
                    "WdfIoQueueCreate failed: %!STATUS!", status);
        return status;
    }

    //
    // Set the Read Queue forwarding for IRP_MJ_READ requests.
    //
    status = WdfDeviceConfigureRequestDispatching( DevExt->Device,
                                       DevExt->ReadQueue,
                                       WdfRequestTypeRead);

    if(!NT_SUCCESS(status)) {
        TraceEvents(TRACE_LEVEL_ERROR, DBG_PNP,
                    "DeviceConfigureRequestDispatching failed: %!STATUS!", status);
        return status;
    }


    //
    // Create a WDFINTERRUPT object.
    //
    status = PLxInterruptCreate(DevExt);

    if (!NT_SUCCESS(status)) {
        return status;
    }

    status = PLxInitializeDMA( DevExt );

    if (!NT_SUCCESS(status)) {
        return status;
    }

    return status;
}
Exemple #24
0
/*
 * @implemented
 */
PVOID
NTAPI
MmAllocateNonCachedMemory(IN SIZE_T NumberOfBytes)
{
    PFN_COUNT PageCount, MdlPageCount;
    PFN_NUMBER PageFrameIndex;
    PHYSICAL_ADDRESS LowAddress, HighAddress, SkipBytes;
    MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
    PMDL Mdl;
    PVOID BaseAddress;
    PPFN_NUMBER MdlPages;
    PMMPTE PointerPte;
    MMPTE TempPte;

    //
    // Get the page count
    //
    ASSERT(NumberOfBytes != 0);
    PageCount = (PFN_COUNT)BYTES_TO_PAGES(NumberOfBytes);

    //
    // Use the MDL allocator for simplicity, so setup the parameters
    //
    LowAddress.QuadPart = 0;
    HighAddress.QuadPart = -1;
    SkipBytes.QuadPart = 0;
    CacheAttribute = MiPlatformCacheAttributes[0][MmNonCached];

    //
    // Now call the MDL allocator
    //
    Mdl = MiAllocatePagesForMdl(LowAddress,
                                HighAddress,
                                SkipBytes,
                                NumberOfBytes,
                                CacheAttribute,
                                0);
    if (!Mdl) return NULL;

    //
    // Get the MDL VA and check how many pages we got (could be partial)
    //
    BaseAddress = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
    MdlPageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(BaseAddress, Mdl->ByteCount);
    if (PageCount != MdlPageCount)
    {
        //
        // Unlike MDLs, partial isn't okay for a noncached allocation, so fail
        //
        ASSERT(PageCount > MdlPageCount);
        MmFreePagesFromMdl(Mdl);
        ExFreePoolWithTag(Mdl, TAG_MDL);
        return NULL;
    }

    //
    // Allocate system PTEs for the base address
    // We use an extra page to store the actual MDL pointer for the free later
    //
    PointerPte = MiReserveSystemPtes(PageCount + 1, SystemPteSpace);
    if (!PointerPte)
    {
        //
        // Out of memory...
        //
        MmFreePagesFromMdl(Mdl);
        ExFreePoolWithTag(Mdl, TAG_MDL);
        return NULL;
    }

    //
    // Store the MDL pointer
    //
    *(PMDL*)PointerPte++ = Mdl;

    //
    // Okay, now see what range we got
    //
    BaseAddress = MiPteToAddress(PointerPte);

    //
    // This is our array of pages
    //
    MdlPages = (PPFN_NUMBER)(Mdl + 1);

    //
    // Setup the template PTE
    //
    TempPte = ValidKernelPte;

    //
    // Now check what kind of caching we should use
    //
    switch (CacheAttribute)
    {
        case MiNonCached:

            //
            // Disable caching
            //
            MI_PAGE_DISABLE_CACHE(&TempPte);
            MI_PAGE_WRITE_THROUGH(&TempPte);
            break;

        case MiWriteCombined:

            //
            // Enable write combining
            //
            MI_PAGE_DISABLE_CACHE(&TempPte);
            MI_PAGE_WRITE_COMBINED(&TempPte);
            break;

        default:
            //
            // Nothing to do
            //
            break;
    }

    //
    // Now loop the MDL pages
    //
    do
    {
        //
        // Get the PFN
        //
        PageFrameIndex = *MdlPages++;

        //
        // Set the PFN in the page and write it
        //
        TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
        MI_WRITE_VALID_PTE(PointerPte++, TempPte);
    } while (--PageCount);

    //
    // Return the base address
    //
    return BaseAddress;

}
Exemple #25
0
VOID
NTAPI
INIT_FUNCTION
MiInitializeNonPagedPool(VOID)
{
    ULONG i;
    PFN_COUNT PoolPages;
    PMMFREE_POOL_ENTRY FreeEntry, FirstEntry;
    PMMPTE PointerPte;
    PAGED_CODE();

    //
    // Initialize the pool S-LISTs as well as their maximum count. In general,
    // we'll allow 8 times the default on a 2GB system, and two times the default
    // on a 1GB system.
    //
    InitializeSListHead(&MiPagedPoolSListHead);
    InitializeSListHead(&MiNonPagedPoolSListHead);
    if (MmNumberOfPhysicalPages >= ((2 * _1GB) /PAGE_SIZE))
    {
        MiNonPagedPoolSListMaximum *= 8;
        MiPagedPoolSListMaximum *= 8;
    }
    else if (MmNumberOfPhysicalPages >= (_1GB /PAGE_SIZE))
    {
        MiNonPagedPoolSListMaximum *= 2;
        MiPagedPoolSListMaximum *= 2;
    }

    //
    // However if debugging options for the pool are enabled, turn off the S-LIST
    // to reduce the risk of messing things up even more
    //
    if (MmProtectFreedNonPagedPool)
    {
        MiNonPagedPoolSListMaximum = 0;
        MiPagedPoolSListMaximum = 0;
    }

    //
    // We keep 4 lists of free pages (4 lists help avoid contention)
    //
    for (i = 0; i < MI_MAX_FREE_PAGE_LISTS; i++)
    {
        //
        // Initialize each of them
        //
        InitializeListHead(&MmNonPagedPoolFreeListHead[i]);
    }

    //
    // Calculate how many pages the initial nonpaged pool has
    //
    PoolPages = (PFN_COUNT)BYTES_TO_PAGES(MmSizeOfNonPagedPoolInBytes);
    MmNumberOfFreeNonPagedPool = PoolPages;

    //
    // Initialize the first free entry
    //
    FreeEntry = MmNonPagedPoolStart;
    FirstEntry = FreeEntry;
    FreeEntry->Size = PoolPages;
    FreeEntry->Signature = MM_FREE_POOL_SIGNATURE;
    FreeEntry->Owner = FirstEntry;

    //
    // Insert it into the last list
    //
    InsertHeadList(&MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS - 1],
                   &FreeEntry->List);

    //
    // Now create free entries for every single other page
    //
    while (PoolPages-- > 1)
    {
        //
        // Link them all back to the original entry
        //
        FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)FreeEntry + PAGE_SIZE);
        FreeEntry->Owner = FirstEntry;
        FreeEntry->Signature = MM_FREE_POOL_SIGNATURE;
    }

    //
    // Validate and remember first allocated pool page
    //
    PointerPte = MiAddressToPte(MmNonPagedPoolStart);
    ASSERT(PointerPte->u.Hard.Valid == 1);
    MiStartOfInitialPoolFrame = PFN_FROM_PTE(PointerPte);

    //
    // Keep track of where initial nonpaged pool ends
    //
    MmNonPagedPoolEnd0 = (PVOID)((ULONG_PTR)MmNonPagedPoolStart +
                                 MmSizeOfNonPagedPoolInBytes);

    //
    // Validate and remember last allocated pool page
    //
    PointerPte = MiAddressToPte((PVOID)((ULONG_PTR)MmNonPagedPoolEnd0 - 1));
    ASSERT(PointerPte->u.Hard.Valid == 1);
    MiEndOfInitialPoolFrame = PFN_FROM_PTE(PointerPte);

    //
    // Validate the first nonpaged pool expansion page (which is a guard page)
    //
    PointerPte = MiAddressToPte(MmNonPagedPoolExpansionStart);
    ASSERT(PointerPte->u.Hard.Valid == 0);

    //
    // Calculate the size of the expansion region alone
    //
    MiExpansionPoolPagesInitialCharge = (PFN_COUNT)
                                        BYTES_TO_PAGES(MmMaximumNonPagedPoolInBytes - MmSizeOfNonPagedPoolInBytes);

    //
    // Remove 2 pages, since there's a guard page on top and on the bottom
    //
    MiExpansionPoolPagesInitialCharge -= 2;

    //
    // Now initialize the nonpaged pool expansion PTE space. Remember there's a
    // guard page on top so make sure to skip it. The bottom guard page will be
    // guaranteed by the fact our size is off by one.
    //
    MiInitializeSystemPtes(PointerPte + 1,
                           MiExpansionPoolPagesInitialCharge,
                           NonPagedPoolExpansion);
}
Exemple #26
0
IO_ALLOCATION_ACTION
sndProgramDMA(
    IN    PDEVICE_OBJECT pDO,
    IN    PIRP pIrp,
    IN    PVOID pMRB,
    IN    PVOID Context
)
/*++

Routine Description:

    This routine is executed when an adapter channel is allocated
    for our DMA needs.

Arguments:

    pDO     - Device object
    pIrp    - IO request packet
    pMRB    -
    Context - Which buffer are we using


Return Value:

    Tell the system what to do with the adapter object

--*/
{
    PGLOBAL_DEVICE_INFO pGDI;
    int WhichBuffer;

    UNREFERENCED_PARAMETER(pIrp);

    WhichBuffer = (int) Context;

    pGDI = ((PLOCAL_DEVICE_INFO)pDO->DeviceExtension)->pGlobalInfo;

    pGDI->pMRB[WhichBuffer] = pMRB;

    sndReStartDMA(pGDI, WhichBuffer);

    //
    // return a value that says we want to keep the channel
    // and map registers.
    //

    if (WhichBuffer == 0) {

        //
        // Do the other one.
        //


        if (pGDI->Usage == SoundInterruptUsageWaveIn) {

            dprintf4("Allocating adapter channel (buffer = 3)");
            IoAllocateAdapterChannel(pGDI->pAdapterObject[3],
                pGDI->pWaveInDevObj,
                BYTES_TO_PAGES(pGDI->DmaHalfBufferSize),
                sndProgramDMA,
                (PVOID)1);              // next buffer


        } else {

            dprintf4("Allocating adapter channel (buffer = 1)");
            IoAllocateAdapterChannel(pGDI->pAdapterObject[1],
                pGDI->pWaveOutDevObj,
                BYTES_TO_PAGES(pGDI->DmaHalfBufferSize),
                sndProgramDMA,
                (PVOID)1);              // next buffer
        }

        //
        // Execution will continue in sndProgramDMA when the
        // adapter has been allocated (AGAIN)
        //

    } else {

        //
        // Now program the hardware on the card to begin the transfer.
        // Note that this must be synchronized with the isr
        //

        dprintf4("Calling (sync) sndInitiate");
        KeSynchronizeExecution(pGDI->pInterrupt,
                               pGDI->StartDMA,
                               pGDI);

        //
        // Execution continues in the SoundInitiate routine
        //
    }

    return KeepObject;
}
Exemple #27
0
/**
 * Intialize the CPU struct and start VM by invoking VmxVirtualize()
 * requires: a valid <GuestRsp>
 */
NTSTATUS NTAPI HvmSubvertCpu (
    PVOID GuestRsp
)
{ //Finish
        PCPU Cpu;//It will be used as the hypervisor struct.
        gvaddr_t HostKernelStackBase;
        NTSTATUS Status;
        gpaddr_t HostStackPA;
		ULONG i;

        Print(("HvmSubvertCpu(): Running on processor #%d\n", KeGetCurrentProcessorNumber()));
	

    // allocate memory for host stack, 16 * 4k
    HostKernelStackBase = MmAllocatePages(HOST_STACK_SIZE_IN_PAGES, &HostStackPA, TRUE);
    //HostKernelStackBase = MmAllocateContiguousPages(HOST_STACK_SIZE_IN_PAGES, &HostStackPA, TRUE);
    if (!HostKernelStackBase) 
    {
        Print(("HvmSubvertCpu(): Failed to allocate %d pages for the host stack\n", HOST_STACK_SIZE_IN_PAGES));
        return STATUS_INSUFFICIENT_RESOURCES;
    }

    // unchecked -8 or -4 ?
    Cpu = (PCPU) ((PCHAR) HostKernelStackBase + HOST_STACK_SIZE_IN_PAGES * PAGE_SIZE - 4 - sizeof (CPU));
    Cpu->HostStack = HostKernelStackBase;

    // for interrupt handlers which will address CPU through the FS
    Cpu->SelfPointer = Cpu;

    Cpu->ProcessorNumber = KeGetCurrentProcessorNumber();

   // Cpu->Nested = FALSE;

   // InitializeListHead (&Cpu->GeneralTrapsList);
   // InitializeListHead (&Cpu->MsrTrapsList);
   // InitializeListHead (&Cpu->IoTrapsList);
    for(i = 0; i < VMX_EXITS_NUM; i++)
    	InitializeListHead (&Cpu->TrapsList[i]);

    Cpu->GdtArea = (PSEGMENT_DESCRIPTOR)MmAllocatePages (BYTES_TO_PAGES (BP_GDT_LIMIT), 
		NULL, TRUE);//Currently we create our own GDT and IDT area
    if (!Cpu->GdtArea) 
    {
        Print(("HvmSubvertCpu(): Failed to allocate memory for GDT\n"));
        return STATUS_INSUFFICIENT_RESOURCES;
    }

    Cpu->IdtArea = MmAllocatePages (BYTES_TO_PAGES (BP_IDT_LIMIT), NULL, TRUE);
    if (!Cpu->IdtArea) 
    {
        Print(("HvmSubvertCpu(): Failed to allocate memory for IDT\n"));
        return STATUS_INSUFFICIENT_RESOURCES;
    }

    Status = Hvm->ArchRegisterTraps(Cpu, arch);//<----------------3.1 Finish
    if (!NT_SUCCESS (Status)) 
    {
		Print(("HvmSubvertCpu(): Failed to register NewBluePill traps, status 0x%08hX\n", Status));
        return STATUS_UNSUCCESSFUL;
    }

    Status = Hvm->ArchInitialize (Cpu, CmSlipIntoMatrix, GuestRsp);//<----------------3.2 Finish
    if (!NT_SUCCESS (Status)) 
    {
        Print(("HvmSubvertCpu(): ArchInitialize() failed with status 0x%08hX\n", Status));
        return Status;
    }

    InterlockedIncrement (&g_uSubvertedCPUs);

    // no API calls allowed below this point: we have overloaded GDTR and selectors
    // unchecked
    _HvmSetupGdt (Cpu);//<----------------3.3 Finish
    _HvmSetupIdt (Cpu);//<----------------3.4 Finish

#if DEBUG_LEVEL > 1
    Print(("HvmSubvertCpu(): RFLAGS = %#x\n", RegGetRflags ()));
#endif

    Status = Hvm->ArchVirtualize(Cpu);//<----------------3.5 Finish

    // never reached
    InterlockedDecrement (&g_uSubvertedCPUs);
    return Status;
}
Exemple #28
0
PTRANSFER_PACKET NewTransferPacket(PDEVICE_OBJECT Fdo)
{
    PFUNCTIONAL_DEVICE_EXTENSION fdoExt = Fdo->DeviceExtension;
    PCLASS_PRIVATE_FDO_DATA fdoData = fdoExt->PrivateFdoData;
    PTRANSFER_PACKET newPkt = NULL;
    ULONG transferLength;
    NTSTATUS status = STATUS_SUCCESS;

    if (NT_SUCCESS(status)) {
        status = RtlULongAdd(fdoData->HwMaxXferLen, PAGE_SIZE, &transferLength);
        if (!NT_SUCCESS(status)) {

            TracePrint((TRACE_LEVEL_ERROR, TRACE_FLAG_RW, "Integer overflow in calculating transfer packet size."));
            status = STATUS_INTEGER_OVERFLOW;
        }
    }

    /*
     *  Allocate the actual packet.
     */
    if (NT_SUCCESS(status)) {
        newPkt = ExAllocatePoolWithTag(NonPagedPool, sizeof(TRANSFER_PACKET), 'pnPC');
        if (newPkt == NULL) {
            TracePrint((TRACE_LEVEL_WARNING, TRACE_FLAG_RW, "Failed to allocate transfer packet."));
            status = STATUS_INSUFFICIENT_RESOURCES;
        }
        else {
            RtlZeroMemory(newPkt, sizeof(TRANSFER_PACKET));
        }
    }

    /*
     *  Allocate Irp for the packet.
     */
    if (NT_SUCCESS(status)) {
        newPkt->Irp = IoAllocateIrp(Fdo->StackSize, FALSE);
        if (newPkt->Irp == NULL) {
            TracePrint((TRACE_LEVEL_WARNING, TRACE_FLAG_RW, "Failed to allocate IRP for transfer packet."));
            status = STATUS_INSUFFICIENT_RESOURCES;
        }
    }

    /*
     * Allocate a MDL.  Add one page to the length to insure an extra page
     * entry is allocated if the buffer does not start on page boundaries.
     */
    if (NT_SUCCESS(status)) {
        newPkt->PartialMdl = IoAllocateMdl(NULL,
                                           transferLength,
                                           FALSE,
                                           FALSE,
                                           NULL);
        if (newPkt->PartialMdl == NULL) {
            TracePrint((TRACE_LEVEL_WARNING, TRACE_FLAG_RW, "Failed to allocate MDL for transfer packet."));
            status = STATUS_INSUFFICIENT_RESOURCES;
        }
        else {
            ASSERT(newPkt->PartialMdl->Size >= (CSHORT)(sizeof(MDL) + BYTES_TO_PAGES(fdoData->HwMaxXferLen) * sizeof(PFN_NUMBER)));
        }

    }

    /*
     * Allocate per-packet retry history, if required
     */
    if (NT_SUCCESS(status) &&
        (fdoData->InterpretSenseInfo != NULL)
        ) {
        // attempt to allocate also the history
        ULONG historyByteCount = 0;

        // SAL annotation and ClassInitializeEx() should both catch this case
        ASSERT(fdoData->InterpretSenseInfo->HistoryCount != 0);
        __analysis_assume(fdoData->InterpretSenseInfo->HistoryCount != 0);

        historyByteCount = sizeof(SRB_HISTORY_ITEM) * fdoData->InterpretSenseInfo->HistoryCount;
        historyByteCount += sizeof(SRB_HISTORY) - sizeof(SRB_HISTORY_ITEM);

        newPkt->RetryHistory = (PSRB_HISTORY)ExAllocatePoolWithTag(NonPagedPool, historyByteCount, 'hrPC');

        if (newPkt->RetryHistory == NULL) {
            TracePrint((TRACE_LEVEL_WARNING, TRACE_FLAG_RW, "Failed to allocate MDL for transfer packet."));
            status = STATUS_INSUFFICIENT_RESOURCES;
        }
        else {
            // call this routine directly once since it's the first initialization of
            // the structure and the internal maximum count field is not yet setup.
            HistoryInitializeRetryLogs(newPkt->RetryHistory, fdoData->InterpretSenseInfo->HistoryCount);
        }
    }

    /*
     *  Enqueue the packet in our static AllTransferPacketsList
     *  (just so we can find it during debugging if its stuck somewhere).
     */
    if (NT_SUCCESS(status))
    {
        KIRQL oldIrql;
        newPkt->Fdo = Fdo;
#if DBG
        newPkt->DbgPktId = InterlockedIncrement(&fdoData->DbgMaxPktId);
#endif
        KeAcquireSpinLock(&fdoData->SpinLock, &oldIrql);
        InsertTailList(&fdoData->AllTransferPacketsList, &newPkt->AllPktsListEntry);
        KeReleaseSpinLock(&fdoData->SpinLock, oldIrql);

    }
    else {
        // free any resources acquired above (in reverse order)
        if (newPkt != NULL)
        {
            FREE_POOL(newPkt->RetryHistory);
            if (newPkt->PartialMdl != NULL) { IoFreeMdl(newPkt->PartialMdl); }
            if (newPkt->Irp        != NULL) { IoFreeIrp(newPkt->Irp);        }
            FREE_POOL(newPkt);
        }
    }

    return newPkt;
}
Exemple #29
0
PVOID
NTAPI
MiAllocatePoolPages(IN POOL_TYPE PoolType,
                    IN SIZE_T SizeInBytes)
{
    PFN_NUMBER PageFrameNumber;
    PFN_COUNT SizeInPages, PageTableCount;
    ULONG i;
    KIRQL OldIrql;
    PLIST_ENTRY NextEntry, NextHead, LastHead;
    PMMPTE PointerPte, StartPte;
    PMMPDE PointerPde;
    ULONG EndAllocation;
    MMPTE TempPte;
    MMPDE TempPde;
    PMMPFN Pfn1;
    PVOID BaseVa, BaseVaStart;
    PMMFREE_POOL_ENTRY FreeEntry;
    PKSPIN_LOCK_QUEUE LockQueue;

    //
    // Figure out how big the allocation is in pages
    //
    SizeInPages = (PFN_COUNT)BYTES_TO_PAGES(SizeInBytes);

    //
    // Check for overflow
    //
    if (SizeInPages == 0)
    {
        //
        // Fail
        //
        return NULL;
    }

    //
    // Handle paged pool
    //
    if ((PoolType & BASE_POOL_TYPE_MASK) == PagedPool)
    {
        //
        // If only one page is being requested, try to grab it from the S-LIST
        //
        if ((SizeInPages == 1) && (ExQueryDepthSList(&MiPagedPoolSListHead)))
        {
            BaseVa = InterlockedPopEntrySList(&MiPagedPoolSListHead);
            if (BaseVa) return BaseVa;
        }

        //
        // Lock the paged pool mutex
        //
        KeAcquireGuardedMutex(&MmPagedPoolMutex);

        //
        // Find some empty allocation space
        //
        i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap,
                                   SizeInPages,
                                   MmPagedPoolInfo.PagedPoolHint);
        if (i == 0xFFFFFFFF)
        {
            //
            // Get the page bit count
            //
            i = ((SizeInPages - 1) / PTE_COUNT) + 1;
            DPRINT("Paged pool expansion: %lu %x\n", i, SizeInPages);

            //
            // Check if there is enougn paged pool expansion space left
            //
            if (MmPagedPoolInfo.NextPdeForPagedPoolExpansion >
                    (PMMPDE)MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool))
            {
                //
                // Out of memory!
                //
                DPRINT1("OUT OF PAGED POOL!!!\n");
                KeReleaseGuardedMutex(&MmPagedPoolMutex);
                return NULL;
            }

            //
            // Check if we'll have to expand past the last PTE we have available
            //
            if (((i - 1) + MmPagedPoolInfo.NextPdeForPagedPoolExpansion) >
                    (PMMPDE)MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool))
            {
                //
                // We can only support this much then
                //
                PointerPde = MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool);
                PageTableCount = (PFN_COUNT)(PointerPde + 1 -
                                             MmPagedPoolInfo.NextPdeForPagedPoolExpansion);
                ASSERT(PageTableCount < i);
                i = PageTableCount;
            }
            else
            {
                //
                // Otherwise, there is plenty of space left for this expansion
                //
                PageTableCount = i;
            }

            //
            // Get the template PDE we'll use to expand
            //
            TempPde = ValidKernelPde;

            //
            // Get the first PTE in expansion space
            //
            PointerPde = MmPagedPoolInfo.NextPdeForPagedPoolExpansion;
            BaseVa = MiPdeToPte(PointerPde);
            BaseVaStart = BaseVa;

            //
            // Lock the PFN database and loop pages
            //
            OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
            do
            {
                //
                // It should not already be valid
                //
                ASSERT(PointerPde->u.Hard.Valid == 0);

                /* Request a page */
                MI_SET_USAGE(MI_USAGE_PAGED_POOL);
                MI_SET_PROCESS2("Kernel");
                PageFrameNumber = MiRemoveAnyPage(MI_GET_NEXT_COLOR());
                TempPde.u.Hard.PageFrameNumber = PageFrameNumber;
#if (_MI_PAGING_LEVELS >= 3)
                /* On PAE/x64 systems, there's no double-buffering */
                ASSERT(FALSE);
#else
                //
                // Save it into our double-buffered system page directory
                //
                MmSystemPagePtes[((ULONG_PTR)PointerPde & (SYSTEM_PD_SIZE - 1)) / sizeof(MMPTE)] = TempPde;

                /* Initialize the PFN */
                MiInitializePfnForOtherProcess(PageFrameNumber,
                                               (PMMPTE)PointerPde,
                                               MmSystemPageDirectory[(PointerPde - MiAddressToPde(NULL)) / PDE_COUNT]);

                /* Write the actual PDE now */
//                MI_WRITE_VALID_PDE(PointerPde, TempPde);
#endif
                //
                // Move on to the next expansion address
                //
                PointerPde++;
                BaseVa = (PVOID)((ULONG_PTR)BaseVa + PAGE_SIZE);
                i--;
            } while (i > 0);

            //
            // Release the PFN database lock
            //
            KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);

            //
            // These pages are now available, clear their availablity bits
            //
            EndAllocation = (ULONG)(MmPagedPoolInfo.NextPdeForPagedPoolExpansion -
                                    (PMMPDE)MiAddressToPte(MmPagedPoolInfo.FirstPteForPagedPool)) *
                            PTE_COUNT;
            RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap,
                         EndAllocation,
                         PageTableCount * PTE_COUNT);

            //
            // Update the next expansion location
            //
            MmPagedPoolInfo.NextPdeForPagedPoolExpansion += PageTableCount;

            //
            // Zero out the newly available memory
            //
            RtlZeroMemory(BaseVaStart, PageTableCount * PAGE_SIZE);

            //
            // Now try consuming the pages again
            //
            i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap,
                                       SizeInPages,
                                       0);
            if (i == 0xFFFFFFFF)
            {
                //
                // Out of memory!
                //
                DPRINT1("OUT OF PAGED POOL!!!\n");
                KeReleaseGuardedMutex(&MmPagedPoolMutex);
                return NULL;
            }
        }

        //
        // Update the pool hint if the request was just one page
        //
        if (SizeInPages == 1) MmPagedPoolInfo.PagedPoolHint = i + 1;

        //
        // Update the end bitmap so we know the bounds of this allocation when
        // the time comes to free it
        //
        EndAllocation = i + SizeInPages - 1;
        RtlSetBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, EndAllocation);

        //
        // Now we can release the lock (it mainly protects the bitmap)
        //
        KeReleaseGuardedMutex(&MmPagedPoolMutex);

        //
        // Now figure out where this allocation starts
        //
        BaseVa = (PVOID)((ULONG_PTR)MmPagedPoolStart + (i << PAGE_SHIFT));

        //
        // Flush the TLB
        //
        KeFlushEntireTb(TRUE, TRUE);

        /* Setup a demand-zero writable PTE */
        MI_MAKE_SOFTWARE_PTE(&TempPte, MM_READWRITE);

        //
        // Find the first and last PTE, then loop them all
        //
        PointerPte = MiAddressToPte(BaseVa);
        StartPte = PointerPte + SizeInPages;
        do
        {
            //
            // Write the demand zero PTE and keep going
            //
            MI_WRITE_INVALID_PTE(PointerPte, TempPte);
        } while (++PointerPte < StartPte);

        //
        // Return the allocation address to the caller
        //
        return BaseVa;
    }

    //
    // If only one page is being requested, try to grab it from the S-LIST
    //
    if ((SizeInPages == 1) && (ExQueryDepthSList(&MiNonPagedPoolSListHead)))
    {
        BaseVa = InterlockedPopEntrySList(&MiNonPagedPoolSListHead);
        if (BaseVa) return BaseVa;
    }

    //
    // Allocations of less than 4 pages go into their individual buckets
    //
    i = SizeInPages - 1;
    if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;

    //
    // Loop through all the free page lists based on the page index
    //
    NextHead = &MmNonPagedPoolFreeListHead[i];
    LastHead = &MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS];

    //
    // Acquire the nonpaged pool lock
    //
    OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
    do
    {
        //
        // Now loop through all the free page entries in this given list
        //
        NextEntry = NextHead->Flink;
        while (NextEntry != NextHead)
        {
            /* Is freed non paged pool enabled */
            if (MmProtectFreedNonPagedPool)
            {
                /* We need to be able to touch this page, unprotect it */
                MiUnProtectFreeNonPagedPool(NextEntry, 0);
            }

            //
            // Grab the entry and see if it can handle our allocation
            //
            FreeEntry = CONTAINING_RECORD(NextEntry, MMFREE_POOL_ENTRY, List);
            ASSERT(FreeEntry->Signature == MM_FREE_POOL_SIGNATURE);
            if (FreeEntry->Size >= SizeInPages)
            {
                //
                // It does, so consume the pages from here
                //
                FreeEntry->Size -= SizeInPages;

                //
                // The allocation will begin in this free page area
                //
                BaseVa = (PVOID)((ULONG_PTR)FreeEntry +
                                 (FreeEntry->Size  << PAGE_SHIFT));

                /* Remove the item from the list, depending if pool is protected */
                if (MmProtectFreedNonPagedPool)
                    MiProtectedPoolRemoveEntryList(&FreeEntry->List);
                else
                    RemoveEntryList(&FreeEntry->List);

                //
                // However, check if its' still got space left
                //
                if (FreeEntry->Size != 0)
                {
                    /* Check which list to insert this entry into */
                    i = FreeEntry->Size - 1;
                    if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;

                    /* Insert the entry into the free list head, check for prot. pool */
                    if (MmProtectFreedNonPagedPool)
                        MiProtectedPoolInsertList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List, TRUE);
                    else
                        InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);

                    /* Is freed non paged pool protected? */
                    if (MmProtectFreedNonPagedPool)
                    {
                        /* Protect the freed pool! */
                        MiProtectFreeNonPagedPool(FreeEntry, FreeEntry->Size);
                    }
                }

                //
                // Grab the PTE for this allocation
                //
                PointerPte = MiAddressToPte(BaseVa);
                ASSERT(PointerPte->u.Hard.Valid == 1);

                //
                // Grab the PFN NextEntry and index
                //
                Pfn1 = MiGetPfnEntry(PFN_FROM_PTE(PointerPte));

                //
                // Now mark it as the beginning of an allocation
                //
                ASSERT(Pfn1->u3.e1.StartOfAllocation == 0);
                Pfn1->u3.e1.StartOfAllocation = 1;

                /* Mark it as special pool if needed */
                ASSERT(Pfn1->u4.VerifierAllocation == 0);
                if (PoolType & VERIFIER_POOL_MASK)
                {
                    Pfn1->u4.VerifierAllocation = 1;
                }

                //
                // Check if the allocation is larger than one page
                //
                if (SizeInPages != 1)
                {
                    //
                    // Navigate to the last PFN entry and PTE
                    //
                    PointerPte += SizeInPages - 1;
                    ASSERT(PointerPte->u.Hard.Valid == 1);
                    Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
                }

                //
                // Mark this PFN as the last (might be the same as the first)
                //
                ASSERT(Pfn1->u3.e1.EndOfAllocation == 0);
                Pfn1->u3.e1.EndOfAllocation = 1;

                //
                // Release the nonpaged pool lock, and return the allocation
                //
                KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
                return BaseVa;
            }

            //
            // Try the next free page entry
            //
            NextEntry = FreeEntry->List.Flink;

            /* Is freed non paged pool protected? */
            if (MmProtectFreedNonPagedPool)
            {
                /* Protect the freed pool! */
                MiProtectFreeNonPagedPool(FreeEntry, FreeEntry->Size);
            }
        }
    } while (++NextHead < LastHead);

    //
    // If we got here, we're out of space.
    // Start by releasing the lock
    //
    KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);

    //
    // Allocate some system PTEs
    //
    StartPte = MiReserveSystemPtes(SizeInPages, NonPagedPoolExpansion);
    PointerPte = StartPte;
    if (StartPte == NULL)
    {
        //
        // Ran out of memory
        //
        DPRINT1("Out of NP Expansion Pool\n");
        return NULL;
    }

    //
    // Acquire the pool lock now
    //
    OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);

    //
    // Lock the PFN database too
    //
    LockQueue = &KeGetCurrentPrcb()->LockQueue[LockQueuePfnLock];
    KeAcquireQueuedSpinLockAtDpcLevel(LockQueue);

    //
    // Loop the pages
    //
    TempPte = ValidKernelPte;
    do
    {
        /* Allocate a page */
        MI_SET_USAGE(MI_USAGE_PAGED_POOL);
        MI_SET_PROCESS2("Kernel");
        PageFrameNumber = MiRemoveAnyPage(MI_GET_NEXT_COLOR());

        /* Get the PFN entry for it and fill it out */
        Pfn1 = MiGetPfnEntry(PageFrameNumber);
        Pfn1->u3.e2.ReferenceCount = 1;
        Pfn1->u2.ShareCount = 1;
        Pfn1->PteAddress = PointerPte;
        Pfn1->u3.e1.PageLocation = ActiveAndValid;
        Pfn1->u4.VerifierAllocation = 0;

        /* Write the PTE for it */
        TempPte.u.Hard.PageFrameNumber = PageFrameNumber;
        MI_WRITE_VALID_PTE(PointerPte++, TempPte);
    } while (--SizeInPages > 0);

    //
    // This is the last page
    //
    Pfn1->u3.e1.EndOfAllocation = 1;

    //
    // Get the first page and mark it as such
    //
    Pfn1 = MiGetPfnEntry(StartPte->u.Hard.PageFrameNumber);
    Pfn1->u3.e1.StartOfAllocation = 1;

    /* Mark it as a verifier allocation if needed */
    ASSERT(Pfn1->u4.VerifierAllocation == 0);
    if (PoolType & VERIFIER_POOL_MASK) Pfn1->u4.VerifierAllocation = 1;

    //
    // Release the PFN and nonpaged pool lock
    //
    KeReleaseQueuedSpinLockFromDpcLevel(LockQueue);
    KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);

    //
    // Return the address
    //
    return MiPteToAddress(StartPte);
}
Exemple #30
0
PVOID
NTAPI
MiAllocateContiguousMemory(IN SIZE_T NumberOfBytes,
                           IN PFN_NUMBER LowestAcceptablePfn,
                           IN PFN_NUMBER HighestAcceptablePfn,
                           IN PFN_NUMBER BoundaryPfn,
                           IN MEMORY_CACHING_TYPE CacheType)
{
    PVOID BaseAddress;
    PFN_NUMBER SizeInPages;
    MI_PFN_CACHE_ATTRIBUTE CacheAttribute;

    //
    // Verify count and cache type
    //
    ASSERT(NumberOfBytes != 0);
    ASSERT(CacheType <= MmWriteCombined);

    //
    // Compute size requested
    //
    SizeInPages = BYTES_TO_PAGES(NumberOfBytes);

    //
    // Convert the cache attribute and check for cached requests
    //
    CacheAttribute = MiPlatformCacheAttributes[FALSE][CacheType];
    if (CacheAttribute == MiCached)
    {
        //
        // Because initial nonpaged pool is supposed to be contiguous, go ahead
        // and try making a nonpaged pool allocation first.
        //
        BaseAddress = ExAllocatePoolWithTag(NonPagedPoolCacheAligned,
                                            NumberOfBytes,
                                            'mCmM');
        if (BaseAddress)
        {
            //
            // Now make sure it's actually contiguous (if it came from expansion
            // it might not be).
            //
            if (MiCheckForContiguousMemory(BaseAddress,
                                           SizeInPages,
                                           SizeInPages,
                                           LowestAcceptablePfn,
                                           HighestAcceptablePfn,
                                           BoundaryPfn,
                                           CacheAttribute))
            {
                //
                // Sweet, we're in business!
                //
                return BaseAddress;
            }

            //
            // No such luck
            //
            ExFreePoolWithTag(BaseAddress, 'mCmM');
        }
    }

    //
    // According to MSDN, the system won't try anything else if you're higher
    // than APC level.
    //
    if (KeGetCurrentIrql() > APC_LEVEL) return NULL;

    //
    // Otherwise, we'll go try to find some
    //
    return MiFindContiguousMemory(LowestAcceptablePfn,
                                  HighestAcceptablePfn,
                                  BoundaryPfn,
                                  SizeInPages,
                                  CacheType);
}