Example #1
0
File: Linux.c Project: Cutty/edk2
EFI_STATUS
EFIAPI
LoadLinux (
  IN VOID      *Kernel,
  IN OUT VOID  *KernelSetup
  )
{
  EFI_STATUS             Status;
  struct boot_params  *Bp;

  Status = BasicKernelSetupCheck (KernelSetup);
  if (EFI_ERROR (Status)) {
    return Status;
  }

  Bp = (struct boot_params *) KernelSetup;

  if (Bp->hdr.version < 0x205 || !Bp->hdr.relocatable_kernel) {
    //
    // We only support relocatable kernels
    //
    return EFI_UNSUPPORTED;
  }

  InitLinuxDescriptorTables ();

  Bp->hdr.code32_start = (UINT32)(UINTN) Kernel;
  if (Bp->hdr.version >= 0x20c && Bp->hdr.handover_offset &&
      (Bp->hdr.load_flags & (sizeof (UINTN) == 4 ? BIT2 : BIT3))) {
    DEBUG ((EFI_D_INFO, "Jumping to kernel EFI handover point at ofs %x\n", Bp->hdr.handover_offset));

    DisableInterrupts ();
    JumpToUefiKernel ((VOID*) gImageHandle, (VOID*) gST, KernelSetup, Kernel);
  }

  //
  // Old kernels without EFI handover protocol
  //
  SetupLinuxBootParams (KernelSetup);

  DEBUG ((EFI_D_INFO, "Jumping to kernel\n"));
  DisableInterrupts ();
  SetLinuxDescriptorTables ();
  JumpToKernel (Kernel, (VOID*) KernelSetup);

  return EFI_SUCCESS;
}
Example #2
0
int Loader32Main(uint16_t* InfoTableAddress, DAP* const DAPKernel64Address, const void* const LoadModuleAddress)
{
	InfoTable = InfoTableAddress;
	ClearScreen();
	ProcessMMAPEntries();

	// Get the max physical address and max linear address that can be handled by the CPU
	// These details are found by using CPUID.EAX=0x80000008 instruction and has to be done from assembly
	// Refer to Intel documentation Vol. 3 Section 4.1.4 
	uint8_t maxphyaddr = GetPhysicalAddressLimit();
	uint8_t maxlinaddr = GetLinearAddressLimit();
	*(InfoTable + 9) = (uint16_t)maxphyaddr;
	*(InfoTable + 10) = (uint16_t)maxlinaddr;

	DAP DAPKernel64 = *DAPKernel64Address;
	uint16_t KernelNumberOfSectors = DAPKernel64.NumberOfSectors;
	uint32_t bytesOfKernelELF = (uint32_t)0x800 * (uint32_t)KernelNumberOfSectors;
	uint64_t KernelVirtualMemSize = *((uint64_t*)(InfoTable + 0xc));	// Get size of kernel in virtual memory

	// Check if enough space is available to load kernel as well as the elf (i.e. length of region > (KernelVirtualMemSize + bytesOfKernelELF))
	// We will load parsed kernel code from 2MiB physical memory (size : KernelVirtualMemSize)
	// Kernel ELF will be loaded at 2MiB + KernelVirtualMemSize + 4KiB physical memory form where it will be parsed
	bool enoughSpace = false;
	size_t numberMMAPentries = GetNumberOfMMAPEntries();
	struct ACPI3Entry* mmap = GetMMAPBase();
	for(size_t i=0; i<numberMMAPentries; i++)
	{
		if((mmap[i].BaseAddress <= (uint64_t)0x100000) && (mmap[i].Length > ((uint64_t)0x201000 - mmap[i].BaseAddress + (uint64_t)bytesOfKernelELF + KernelVirtualMemSize)))
		{
			enoughSpace = true;
			break;
		}
	}
	if(!enoughSpace)
	{
		ClearScreen();
		PrintString("\nFatal Error : System memory is fragmented too much.\nNot enough space to load kernel.\nCannot boot!");
		return 1;
	}
	PrintString("Loading kernel...\n");

	// We will be identity mapping the first 16 MiB of the physical memory
	// To see how the mapping is done refer to docs/mapping.txt
	IdentityMapFirst16MiB();

	// In GDT change base address of the 16-bit segments
	Setup16BitSegments(InfoTableAddress, LoadModuleAddress, DAPKernel64Address, *InfoTable); // InfoTable[0] = boot disk number

	// Enter the kernel physical memory base address in the info table
	uint32_t KernelBase = 0x200000;
	*((uint64_t*)(InfoTable + 0x10)) = (uint64_t)KernelBase;

	// We have 64 KiB free in physical memory from 0x80000 to 0x90000. The sector in our OS ISO image is 2 KiB in size.
	// So we can load the kernel ELF in batches of 32 sectors. Leave a gap of 4 KiB between kernel process and kernel ELF
	uint32_t KernelELFBase = 0x201000 + (uint32_t)KernelVirtualMemSize;
	DAPKernel64Address->offset = 0x0;
	DAPKernel64Address->segment = 0x8000;
	DAPKernel64Address->NumberOfSectors = 32;
	uint16_t iters = KernelNumberOfSectors/32;
	memset((void*)0x80000,0,0x10000);
	for(uint16_t i=0; i<iters;i++)
	{
		LoadKernelELFSectors();
		memcopy((void*)0x80000, (void*)(KernelELFBase + i*0x10000),0x10000);
		DAPKernel64Address->FirstSector += 32;
	}
	// Load remaining sectors
	DAPKernel64Address->NumberOfSectors = KernelNumberOfSectors % 32;
	LoadKernelELFSectors();
	memcopy((void*)0x80000,(void*)(KernelELFBase + iters*0x10000),DAPKernel64Address->NumberOfSectors * 0x800);

	PrintString("Kernel executable loaded.\n");

	// Parse the kernel executable
	uint16_t ELFFlags = *((uint16_t*)(KernelELFBase + 4));
	if(ELFFlags != 0x0102)
	{
		PrintString("\nKernel executable corrupted! Cannot boot!");
		return 1;
	}
	uint32_t ProgramHeaderTable = *((uint32_t*)(KernelELFBase + 32));
	uint16_t ProgramHeaderEntrySize = *((uint16_t*)(KernelELFBase + 54));
	uint16_t ProgramHeaderEntries = *((uint16_t*)(KernelELFBase + 56));
	if(ProgramHeaderEntrySize != sizeof(ELF64ProgramHeader))
	{
		PrintString("\nKernel executable corrupted! Cannot boot!");
		return 1;
	}
	ELF64ProgramHeader *ProgramHeader = (ELF64ProgramHeader*)(KernelELFBase + ProgramHeaderTable);
	uint32_t MemorySeekp = KernelBase;
	PML4E* PML4T = (PML4E*)0x110000;
	uint32_t NewPageStart = 0x11b000; // New pages that need to be made should start from this address and add 0x1000 to it.
	for(uint16_t i=0; i<ProgramHeaderEntries; i++)
	{
		uint32_t SizeInMemory = (uint32_t)ProgramHeader[i].SegmentSizeInMemory;

		memset((void*)MemorySeekp, 0, SizeInMemory);
		memcopy((void*)(KernelELFBase + (uint32_t)ProgramHeader[i].FileOffset), (void*)MemorySeekp, (uint32_t)ProgramHeader[i].SegmentSizeInFile);

		// Our kernel is linked at higher half addresses. (right now it is last 2GiB of 64-bit address space, may change if linker script is changed)
		// Map this section in the paging structure
		size_t NumberOfPages = SizeInMemory/0x1000;
		if(SizeInMemory%0x1000)
		{
			NumberOfPages++;
		}
		for(size_t j = 0; j<NumberOfPages; j++, MemorySeekp += 0x1000)
		{
			uint64_t VirtualMemoryAddress = ProgramHeader[i].VirtualMemoryAddress + j*0x1000;
			VirtualMemoryAddress >>= 12;
			uint16_t PTIndex = VirtualMemoryAddress & 0x1ff;
			VirtualMemoryAddress >>= 9;
			uint16_t PDIndex = VirtualMemoryAddress & 0x1ff;
			VirtualMemoryAddress >>= 9;
			uint16_t PDPTIndex = VirtualMemoryAddress & 0x1ff;
			VirtualMemoryAddress >>= 9;
			uint16_t PML4TIndex = VirtualMemoryAddress & 0x1ff;
			if(PML4T[PML4TIndex].Present)
			{
				PDPTE* PDPT = (PDPTE*)((uint32_t)PML4T[PML4TIndex].PageAddress<<12);
				if(PDPT[PDPTIndex].Present)
				{
					PDE* PD = (PDE*)((uint32_t)PDPT[PDPTIndex].PageAddress<<12);
					if(PD[PDIndex].Present)
					{
						PTE* PT = (PTE*)((uint32_t)PD[PDIndex].PageAddress<<12);
						if(!PT[PTIndex].Present)
						{
							AllocatePagingEntry(&(PT[PTIndex]), MemorySeekp);
						}
					}
					else
					{
						AllocatePagingEntry(&(PD[PDIndex]), NewPageStart);
						PTE* PT = (PTE*)NewPageStart;
						NewPageStart += 0x1000;
						AllocatePagingEntry(&(PT[PTIndex]), MemorySeekp);
					}
				}
				else
				{
					AllocatePagingEntry(&(PDPT[PDPTIndex]), NewPageStart);
					PDE* PD = (PDE*)NewPageStart;
					NewPageStart += 0x1000;
					AllocatePagingEntry(&(PD[PDIndex]), NewPageStart);
					PTE* PT = (PTE*)NewPageStart;
					NewPageStart += 0x1000;
					AllocatePagingEntry(&(PT[PTIndex]), MemorySeekp);
				}
			}
			else
			{
				AllocatePagingEntry(&(PML4T[PML4TIndex]), NewPageStart);
				PDPTE* PDPT = (PDPTE*)NewPageStart;
				NewPageStart += 0x1000;
				AllocatePagingEntry(&(PDPT[PDPTIndex]), NewPageStart);
				PDE* PD = (PDE*)NewPageStart;
				NewPageStart += 0x1000;
				AllocatePagingEntry(&(PD[PDIndex]), NewPageStart);
				PTE* PT = (PTE*)NewPageStart;
				NewPageStart += 0x1000;
				AllocatePagingEntry(&(PT[PTIndex]), MemorySeekp);
			}
		}
	}

	JumpToKernel(PML4T, InfoTableAddress);	// Jump to kernel. Code beyond this should never get executed.
	ClearScreen();
	PrintString("Fatal error : Cannot boot!");
	return 1;
}