/* * Stack space used when we detect a bad kernel stack pointer, and * early in SMP boots before relocation is enabled. */ static void __init emergency_stack_init(void) { u64 limit; unsigned int i; /* * Emergency stacks must be under 256MB, we cannot afford to take * SLB misses on them. The ABI also requires them to be 128-byte * aligned. * * Since we use these as temporary stacks during secondary CPU * bringup, we need to get at them in real mode. This means they * must also be within the RMO region. */ limit = min(slb0_limit(), memblock.rmo_size); for_each_possible_cpu(i) { unsigned long sp; sp = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit); sp += THREAD_SIZE; paca[i].emergency_sp = __va(sp); } }
static int alpha_core_agp_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { alpha_agp_info *agp = agp_bridge->dev_private_data; dma_addr_t dma_addr; unsigned long pa; struct page *page; dma_addr = (unsigned long)vmf->virtual_address - vma->vm_start + agp->aperture.bus_base; pa = agp->ops->translate(agp, dma_addr); if (pa == (unsigned long)-EINVAL) return VM_FAULT_SIGBUS; /* no translation */ /* * Get the page, inc the use count, and return it */ page = virt_to_page(__va(pa)); get_page(page); vmf->page = page; return 0; }
static int __init acpi_parse_fadt(unsigned long phys_addr, unsigned long size) { struct acpi_table_header *fadt_header; struct fadt_descriptor *fadt; if (!phys_addr || !size) return -EINVAL; fadt_header = (struct acpi_table_header *)__va(phys_addr); if (fadt_header->revision != 3) return -ENODEV; /* Only deal with ACPI 2.0 FADT */ fadt = (struct fadt_descriptor *)fadt_header; if (!(fadt->iapc_boot_arch & BAF_8042_KEYBOARD_CONTROLLER)) acpi_kbd_controller_present = 0; if (fadt->iapc_boot_arch & BAF_LEGACY_DEVICES) acpi_legacy_devices = 1; acpi_register_gsi(fadt->sci_int, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW); return 0; }
int arch_remove_memory(u64 start, u64 size) { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; struct zone *zone; int ret; zone = page_zone(pfn_to_page(start_pfn)); ret = __remove_pages(zone, start_pfn, nr_pages); if (ret) return ret; /* Remove htab bolted mappings for this section of memory */ start = (unsigned long)__va(start); ret = remove_section_mapping(start, start + size); /* Ensure all vmalloc mappings are flushed in case they also * hit that section of memory */ vm_unmap_aliases(); return ret; }
static struct page *alpha_core_agp_vm_nopage(struct vm_area_struct *vma, unsigned long address, int *type) { alpha_agp_info *agp = agp_bridge->dev_private_data; dma_addr_t dma_addr; unsigned long pa; struct page *page; dma_addr = address - vma->vm_start + agp->aperture.bus_base; pa = agp->ops->translate(agp, dma_addr); if (pa == (unsigned long)-EINVAL) return NULL; /* no translation */ /* * Get the page, inc the use count, and return it */ page = virt_to_page(__va(pa)); get_page(page); if (type) *type = VM_FAULT_MINOR; return page; }
static s64 __uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5) { struct uv_systab *tab = uv_systab; s64 ret; if (!tab || !tab->function) /* * BIOS does not support UV systab */ return BIOS_STATUS_UNIMPLEMENTED; /* * If EFI_OLD_MEMMAP is set, we need to fall back to using our old EFI * callback method, which uses efi_call() directly, with the kernel page tables: */ if (unlikely(test_bit(EFI_OLD_MEMMAP, &efi.flags))) ret = efi_call((void *)__va(tab->function), (u64)which, a1, a2, a3, a4, a5); else ret = efi_call_virt_pointer(tab, function, (u64)which, a1, a2, a3, a4, a5); return ret; }
/* * maps a range of physical memory into the requested pages. the old * mappings are removed. any references to nonexistent pages results * in null mappings (currently treated as "copy-on-access") */ static inline void remap_pte_range(pte_t * pte, unsigned long address, unsigned long size, unsigned long phys_addr, pgprot_t prot) { unsigned long end; address &= ~PMD_MASK; end = address + size; if (end > PMD_SIZE) end = PMD_SIZE; do { struct page *page; pte_t oldpage; oldpage = ptep_get_and_clear(pte); page = virt_to_page(__va(phys_addr)); if ((!VALID_PAGE(page)) || PageReserved(page)) set_pte(pte, mk_pte_phys(phys_addr, prot)); forget_pte(oldpage); address += PAGE_SIZE; phys_addr += PAGE_SIZE; pte++; } while (address && (address < end)); }
static void * __meminit altmap_alloc_block_buf(unsigned long size, struct vmem_altmap *altmap) { unsigned long pfn, nr_pfns; void *ptr; if (size & ~PAGE_MASK) { pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld)\n", __func__, size); return NULL; } nr_pfns = size >> PAGE_SHIFT; pfn = vmem_altmap_alloc(altmap, nr_pfns); if (pfn < ULONG_MAX) ptr = __va(__pfn_to_phys(pfn)); else ptr = NULL; pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n", __func__, pfn, altmap->alloc, altmap->align, nr_pfns); return ptr; }
void __init reserve_ebda_region(void) { unsigned int lowmem, ebda_addr; /* */ /* */ /* */ /* */ /* */ /* */ if (paravirt_enabled()) return; /* */ lowmem = *(unsigned short *)__va(BIOS_LOWMEM_KILOBYTES); lowmem <<= 10; /* */ ebda_addr = get_bios_ebda(); /* */ /* */ if ((lowmem - ebda_addr) <= 0x10000) lowmem = ebda_addr; /* */ /* */ if ((ebda_addr == 0) && (lowmem >= 0x9f000)) lowmem = 0x9f000; /* */ if ((lowmem == 0) || (lowmem >= 0x100000)) lowmem = 0x9f000; /* */ memblock_reserve(lowmem, 0x100000 - lowmem); }
void __init reserve_ebda_region(void) { unsigned int lowmem, ebda_addr; /* To determine the position of the EBDA and the */ /* end of conventional memory, we need to look at */ /* the BIOS data area. In a paravirtual environment */ /* that area is absent. We'll just have to assume */ /* that the paravirt case can handle memory setup */ /* correctly, without our help. */ if (paravirt_enabled()) return; /* end of low (conventional) memory */ lowmem = *(unsigned short *)__va(BIOS_LOWMEM_KILOBYTES); lowmem <<= 10; /* start of EBDA area */ ebda_addr = get_bios_ebda(); /* Fixup: bios puts an EBDA in the top 64K segment */ /* of conventional memory, but does not adjust lowmem. */ if ((lowmem - ebda_addr) <= 0x10000) lowmem = ebda_addr; /* Fixup: bios does not report an EBDA at all. */ /* Some old Dells seem to need 4k anyhow (bugzilla 2990) */ if ((ebda_addr == 0) && (lowmem >= 0x9f000)) lowmem = 0x9f000; /* Paranoia: should never happen, but... */ if ((lowmem == 0) || (lowmem >= 0x100000)) lowmem = 0x9f000; /* reserve all memory between lowmem and the 1MB mark */ reserve_early_overlap_ok(lowmem, 0x100000, "BIOS reserved"); }
static void __ref init_fallback_flush(void) { u64 l1d_size, limit; int cpu; /* Only allocate the fallback flush area once (at boot time). */ if (l1d_flush_fallback_area) return; l1d_size = ppc64_caches.l1d.size; /* * If there is no d-cache-size property in the device tree, l1d_size * could be zero. That leads to the loop in the asm wrapping around to * 2^64-1, and then walking off the end of the fallback area and * eventually causing a page fault which is fatal. Just default to * something vaguely sane. */ if (!l1d_size) l1d_size = (64 * 1024); limit = min(ppc64_bolted_size(), ppc64_rma_size); /* * Align to L1d size, and size it at 2x L1d size, to catch possible * hardware prefetch runoff. We don't have a recipe for load patterns to * reliably avoid the prefetcher. */ l1d_flush_fallback_area = __va(memblock_alloc_base(l1d_size * 2, l1d_size, limit)); memset(l1d_flush_fallback_area, 0, l1d_size * 2); for_each_possible_cpu(cpu) { struct paca_struct *paca = paca_ptrs[cpu]; paca->rfi_flush_fallback_area = l1d_flush_fallback_area; paca->l1d_flush_size = l1d_size; } }
static void nlm_init_bootmem_node (unsigned long mapstart, unsigned long min_pfn, unsigned long max_pfn) { int i; for (i = 0; i < NLM_MAX_CPU_NODE; i++) { unsigned long map_pfn, start_pfn, end_pfn, bootmem_size; int j; if(!node_online(i)) continue; start_pfn = NODE_MEM_DATA(i)->low_pfn; end_pfn = NODE_MEM_DATA(i)->high_pfn; if (start_pfn && start_pfn < min_pfn) start_pfn = min_pfn; if (end_pfn > max_pfn) end_pfn = max_pfn; /* in general, never hit the condition */ if (start_pfn && start_pfn >= end_pfn) { NODE_MEM_DATA(i)->map_pfn = 0; /* indicate a bad map_pfn */ continue; } if (start_pfn > mapstart) map_pfn = start_pfn; else map_pfn = mapstart; if((start_pfn == 0) && (end_pfn == 0)) { map_pfn = _low_virt_to_phys(&_node_map_mem[i][0]) >> PAGE_SHIFT; __node_data[i] = __va(map_pfn << PAGE_SHIFT); } else {
/* * Find out what kind of machine we're on and save any data we need * from the early boot process (devtree is copied on pmac by prom_init()). * This is called very early on the boot process, after a minimal * MMU environment has been set up but before MMU_init is called. */ notrace void __init machine_init(unsigned long dt_ptr, unsigned long phys) { /* Enable early debugging if any specified (see udbg.h) */ udbg_early_init(); /* Do some early initialization based on the flat device tree */ early_init_devtree(__va(dt_ptr)); probe_machine(); #ifdef CONFIG_6xx if (cpu_has_feature(CPU_FTR_CAN_DOZE) || cpu_has_feature(CPU_FTR_CAN_NAP)) ppc_md.power_save = ppc6xx_idle; #endif #ifdef CONFIG_E500 if (cpu_has_feature(CPU_FTR_CAN_DOZE) || cpu_has_feature(CPU_FTR_CAN_NAP)) ppc_md.power_save = e500_idle; #endif if (ppc_md.progress) ppc_md.progress("id mach(): done", 0x200); }
static int __init acpi_parse_madt (unsigned long phys_addr, unsigned long size) { if (!phys_addr || !size) return -EINVAL; acpi_madt = (struct acpi_table_madt *) __va(phys_addr); /* remember the value for reference after free_initmem() */ #ifdef CONFIG_ITANIUM has_8259 = 1; /* Firmware on old Itanium systems is broken */ #else has_8259 = acpi_madt->flags.pcat_compat; #endif iosapic_system_init(has_8259); /* Get base address of IPI Message Block */ if (acpi_madt->lapic_address) ipi_base_addr = (unsigned long) ioremap(acpi_madt->lapic_address, 0); printk(KERN_INFO PREFIX "Local APIC address 0x%lx\n", ipi_base_addr); return 0; }
static __ref void *early_alloc_pgtable(unsigned long size, int nid, unsigned long region_start, unsigned long region_end) { unsigned long pa = 0; void *pt; if (region_start || region_end) /* has region hint */ pa = memblock_alloc_range(size, size, region_start, region_end, MEMBLOCK_NONE); else if (nid != -1) /* has node hint */ pa = memblock_alloc_base_nid(size, size, MEMBLOCK_ALLOC_ANYWHERE, nid, MEMBLOCK_NONE); if (!pa) pa = memblock_alloc_base(size, size, MEMBLOCK_ALLOC_ANYWHERE); BUG_ON(!pa); pt = __va(pa); memset(pt, 0, size); return pt; }
void check_for_bios_corruption(void) { int i; int corruption = 0; if (!memory_corruption_check) return; for (i = 0; i < num_scan_areas; i++) { unsigned long *addr = __va(scan_areas[i].addr); unsigned long size = scan_areas[i].size; for (; size; addr++, size -= sizeof(unsigned long)) { if (!*addr) continue; printk(KERN_ERR "Corrupted low memory at %p (%lx phys) = %08lx\n", addr, __pa(addr), *addr); corruption = 1; *addr = 0; } } WARN_ONCE(corruption, KERN_ERR "Memory corruption detected in low memory\n"); }
void msm_iommu_pagetable_free_tables(struct msm_iommu_pt *pt, unsigned long va, size_t len) { /* * Adding 2 for worst case. We could be spanning 3 second level pages * if we unmapped just over 1MB. */ u32 n_entries = len / SZ_1M + 2; u32 fl_offset = FL_OFFSET(va); u32 i; for (i = 0; i < n_entries && fl_offset < NUM_FL_PTE; ++i) { u32 *fl_pte_shadow = pt->fl_table_shadow + fl_offset; void *sl_table_va = __va(((*fl_pte_shadow) & ~0x1FF)); u32 sl_table = *fl_pte_shadow; if (sl_table && !(sl_table & 0x1FF)) { free_pages((unsigned long) sl_table_va, get_order(SZ_4K)); *fl_pte_shadow = 0; } ++fl_offset; } }
static void gnttab_pre_unmap_grant_ref( struct gnttab_unmap_grant_ref *unmap, int count) { long slot; int i; ulong ea; unsigned long dummy1, dummy2; ulong flags; /* paranoia */ local_irq_save(flags); for (i = 0 ; i < count; i++) { struct page *page; ea = (ulong)__va(unmap[i].host_addr); page = virt_to_page(ea); if (!gnt_unmap(page)) { DBG("%s[0x%x]: skip: 0x%lx, mapcount 0x%x\n", __func__, i, ea, gnt_mapcount(page)); continue; } slot = find_map_slot(ea); if (slot < 0) { printk(KERN_EMERG "%s: PTE not found: 0x%lx\n", __func__, ea); continue; } DBG("%s[0x%x]: 0x%lx: mapcount: 0x%x\n", __func__, i, ea, gnt_mapcount(page)); plpar_pte_remove(0, slot, 0, &dummy1, &dummy2); } local_irq_restore(flags); }
void manage_entry(int level, int index,void *entry) { void* control_bit; void* address; void* real_address_pa; void* real_address_va; control_bit = (void *)((ulong) entry & 0x0000000000000fff); address = (void *)((ulong) entry & 0xfffffffffffff000); real_address_pa = address; real_address_va = __va(real_address_pa); if(!((ulong)control_bit ^ 0x0000000000000061)) return; switch(level) { case 0: AUDIT_ENTRY_PML4E printk(KERN_ERR "\tPML4E_%d: %p \t Address:%p \t Control_bit:%p\n",index,entry,address,control_bit); AUDIT_ENTRY_PML4E printk(KERN_ERR "\t\t\t\t\t PA:%p \t\t VA:%p\n",real_address_pa,real_address_va); break; case 1: AUDIT_ENTRY_PDPTE printk(KERN_ERR "\t\tPDPTE_%d: %p \t Address:%p \t Control_bit:%p\n",index,entry,address,control_bit); AUDIT_ENTRY_PDPTE printk(KERN_ERR "\t\t\t\t\t\t PA:%p \t\t VA:%p\n",real_address_pa,real_address_va); break; case 2: AUDIT_ENTRY_PDE printk(KERN_ERR "\t\t\tPDE_%d: %p \t Address:%p \t Control_bit:%p\n",index,entry,address,control_bit); AUDIT_ENTRY_PDE printk(KERN_ERR "\t\t\t\t\t\t\t PA:%p \t\t VA:%p\n",real_address_pa,real_address_va); break; case 3: AUDIT_ENTRY_PTE printk(KERN_ERR "\t\t\t\tPTE_%d: %p \t Address:%p \t Control_bit:%p\n",index,entry,address,control_bit); AUDIT_ENTRY_PTE printk(KERN_ERR "\t\t\t\t\t\t\t\t PA:%p \t\t VA:%p\n",real_address_pa,real_address_va); break; } if(level == 3) return; walk_table(level+1,index,real_address_va); }
/* * Function to restore the table entry that * was modified for enabling MMU */ static void restore_mmu_table_entry(void) { u32 *scratchpad_address; u32 previous_value, control_reg_value; u32 *address; /* * Get address of entry that was modified */ scratchpad_address = sar_ram_base + MMU_OFFSET; address = (u32 *)readl(scratchpad_address + TABLE_ADDRESS_OFFSET); /* * Get the previous value which needs to be restored */ previous_value = readl(scratchpad_address + TABLE_VALUE_OFFSET); address = __va(address); *address = previous_value; flush_tlb_all(); control_reg_value = readl(scratchpad_address + CR_VALUE_OFFSET); /* * Restore the Control register */ set_cr(control_reg_value); }
/* * MMU_init_hw does the chip-specific initialization of the MMU hardware. */ void __init MMU_init_hw(void) { /* PIN up to the 3 first 8Mb after IMMR in DTLB table */ #ifdef CONFIG_PIN_TLB_DATA unsigned long ctr = mfspr(SPRN_MD_CTR) & 0xfe000000; unsigned long flags = 0xf0 | MD_SPS16K | _PAGE_PRIVILEGED | _PAGE_DIRTY; #ifdef CONFIG_PIN_TLB_IMMR int i = 29; #else int i = 28; #endif unsigned long addr = 0; unsigned long mem = total_lowmem; for (; i < 32 && mem >= LARGE_PAGE_SIZE_8M; i++) { mtspr(SPRN_MD_CTR, ctr | (i << 8)); mtspr(SPRN_MD_EPN, (unsigned long)__va(addr) | MD_EVALID); mtspr(SPRN_MD_TWC, MD_PS8MEG | MD_SVALID); mtspr(SPRN_MD_RPN, addr | flags | _PAGE_PRESENT); addr += LARGE_PAGE_SIZE_8M; mem -= LARGE_PAGE_SIZE_8M; } #endif }
void __init efi_call_phys_prelog(void) { unsigned long vaddress; int pgd; int n_pgds; early_code_mapping_set_exec(1); local_irq_save(efi_flags); n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE); save_pgd = kmalloc(n_pgds * sizeof(pgd_t), GFP_KERNEL); for (pgd = 0; pgd < n_pgds; pgd++) { save_pgd[pgd] = *pgd_offset_k(pgd * PGDIR_SIZE); vaddress = (unsigned long)__va(pgd * PGDIR_SIZE); set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress)); } #ifdef CONFIG_PAX_PER_CPU_PGD load_cr3(swapper_pg_dir); #endif __flush_tlb_all(); }
void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) { return __va(memblock_alloc(size, align)); }
static int oeth_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct oeth_private *cep = (struct oeth_private *)dev->priv; volatile oeth_bd *bdp; unsigned long flags; D(printk("X")); /* Fill in a Tx ring entry */ bdp = cep->tx_bd_base + cep->tx_next; if (cep->tx_full) { /* All transmit buffers are full. Bail out. */ printk("%s: tx queue full!.\n", dev->name); return 1; } /* Clear all of the status flags. */ bdp->len_status &= ~OETH_TX_BD_STATS; /* If the frame is short, tell CPM to pad it. */ if (skb->len <= ETH_ZLEN) bdp->len_status |= OETH_TX_BD_PAD; else bdp->len_status &= ~OETH_TX_BD_PAD; #ifdef DEBUG _print("TX\n"); oeth_print_packet((unsigned long)skb->data, skb->len); #endif #ifdef TXBUFF_PREALLOC /* Copy data in preallocated buffer */ if (skb->len > OETH_TX_BUFF_SIZE) { printk("%s: tx frame too long!.\n", dev->name); return 1; } else memcpy((unsigned char *)__va(bdp->addr), skb->data, skb->len); bdp->len_status = (bdp->len_status & 0x0000ffff) | (skb->len << 16); dev_kfree_skb(skb); #else /* Set buffer length and buffer pointer. */ bdp->len_status = (bdp->len_status & 0x0000ffff) | (skb->len << 16); bdp->addr = (uint)__pa(skb->data); /* Save skb pointer. */ cep->tx_skbuff[cep->tx_next] = skb; #endif cep->tx_next = (cep->tx_next + 1) & OETH_TXBD_NUM_MASK; local_irq_save(flags); if (cep->tx_next == cep->tx_last) cep->tx_full = 1; /* Send it on its way. Tell controller its ready, interrupt when done, * and to put the CRC on the end. */ bdp->len_status |= (OETH_TX_BD_READY | OETH_TX_BD_IRQ | OETH_TX_BD_CRC); dev->trans_start = jiffies; local_irq_restore(flags); return 0; }
static void oeth_rx(struct net_device *dev) { struct oeth_private *cep; volatile oeth_bd *bdp; struct sk_buff *skb; int pkt_len; int bad = 0; #ifndef RXBUFF_PREALLOC struct sk_buff *small_skb; #endif D(printk("r")); cep = (struct oeth_private *)dev->priv; /* First, grab all of the stats for the incoming packet. * These get messed up if we get called due to a busy condition. */ for (;;cep->rx_cur = (cep->rx_cur + 1) & OETH_RXBD_NUM_MASK) { bdp = cep->rx_bd_base + cep->rx_cur; #ifndef RXBUFF_PREALLOC skb = cep->rx_skbuff[cep->rx_cur]; if (skb == NULL) { skb = dev_alloc_skb(MAX_FRAME_SIZE + 2); if (skb != NULL) { bdp->addr = (unsigned long) skb->tail; bdp->len_status |= OETH_RX_BD_EMPTY; } skb_reserve(skb, 2); continue; } #endif if (bdp->len_status & OETH_RX_BD_EMPTY) break; /* Check status for errors. */ if (bdp->len_status & (OETH_RX_BD_TOOLONG | OETH_RX_BD_SHORT)) { cep->stats.rx_length_errors++; bad = 1; } if (bdp->len_status & OETH_RX_BD_DRIBBLE) { cep->stats.rx_frame_errors++; bad = 1; } if (bdp->len_status & OETH_RX_BD_CRCERR) { cep->stats.rx_crc_errors++; bad = 1; } if (bdp->len_status & OETH_RX_BD_OVERRUN) { cep->stats.rx_crc_errors++; bad = 1; } if (bdp->len_status & OETH_RX_BD_MISS) { } if (bdp->len_status & OETH_RX_BD_LATECOL) { cep->stats.rx_frame_errors++; bad = 1; } if (bad) { bdp->len_status &= ~OETH_RX_BD_STATS; bdp->len_status |= OETH_RX_BD_EMPTY; continue; } /* Process the incoming frame. */ pkt_len = bdp->len_status >> 16; #ifdef RXBUFF_PREALLOC skb = dev_alloc_skb(pkt_len + 2); if (skb == NULL) { printk("%s: Memory squeeze, dropping packet.\n", dev->name); cep->stats.rx_dropped++; } else { skb_reserve(skb, 2); /* longword align L3 header */ skb->dev = dev; #ifdef DEBUG _print("RX\n"); oeth_print_packet((unsigned long)__va(bdp->addr), pkt_len); #endif memcpy(skb_put(skb, pkt_len), (unsigned char *)__va(bdp->addr), pkt_len); skb->protocol = eth_type_trans(skb,dev); netif_rx(skb); cep->stats.rx_packets++; } bdp->len_status &= ~OETH_RX_BD_STATS; bdp->len_status |= OETH_RX_BD_EMPTY; #else if (pkt_len < 128) { small_skb = dev_alloc_skb(pkt_len); if (small_skb) { small_skb->dev = dev; #if DEBUG _print("RX short\n"); oeth_print_packet(__va(bdp->addr), bdp->len_status >> 16); #endif memcpy(skb_put(small_skb, pkt_len), (unsigned char *)__va(bdp->addr), pkt_len); small_skb->protocol = eth_type_trans(small_skb,dev); netif_rx(small_skb); cep->stats.rx_packets++; } else { printk("%s: Memory squeeze, dropping packet.\n", dev->name); cep->stats.rx_dropped++; } bdp->len_status &= ~OETH_RX_BD_STATS; bdp->len_status |= OETH_RX_BD_EMPTY; }
/** * free_bootmem_node - mark a page range as usable * @pgdat: node the range resides on * @physaddr: starting address of the range * @size: size of the range in bytes * * Partial pages will be considered reserved and left as they are. * * The range must reside completely on the specified node. */ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, unsigned long size) { kmemleak_free_part(__va(physaddr), size); memblock_x86_free_range(physaddr, physaddr + size); }
void __init early_setup(unsigned long dt_ptr) { static __initdata struct paca_struct boot_paca; /* -------- printk is _NOT_ safe to use here ! ------- */ /* Try new device tree based feature discovery ... */ if (!dt_cpu_ftrs_init(__va(dt_ptr))) /* Otherwise use the old style CPU table */ identify_cpu(0, mfspr(SPRN_PVR)); /* Assume we're on cpu 0 for now. Don't write to the paca yet! */ initialise_paca(&boot_paca, 0); setup_paca(&boot_paca); fixup_boot_paca(); /* -------- printk is now safe to use ------- */ /* Enable early debugging if any specified (see udbg.h) */ udbg_early_init(); DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr); /* * Do early initialization using the flattened device * tree, such as retrieving the physical memory map or * calculating/retrieving the hash table size. */ early_init_devtree(__va(dt_ptr)); /* Now we know the logical id of our boot cpu, setup the paca. */ if (boot_cpuid != 0) { /* Poison paca_ptrs[0] again if it's not the boot cpu */ memset(&paca_ptrs[0], 0x88, sizeof(paca_ptrs[0])); } setup_paca(paca_ptrs[boot_cpuid]); fixup_boot_paca(); /* * Configure exception handlers. This include setting up trampolines * if needed, setting exception endian mode, etc... */ configure_exceptions(); /* Apply all the dynamic patching */ apply_feature_fixups(); setup_feature_keys(); /* Initialize the hash table or TLB handling */ early_init_mmu(); /* * After firmware and early platform setup code has set things up, * we note the SPR values for configurable control/performance * registers, and use those as initial defaults. */ record_spr_defaults(); /* * At this point, we can let interrupts switch to virtual mode * (the MMU has been setup), so adjust the MSR in the PACA to * have IR and DR set and enable AIL if it exists */ cpu_ready_for_interrupts(); /* * We enable ftrace here, but since we only support DYNAMIC_FTRACE, it * will only actually get enabled on the boot cpu much later once * ftrace itself has been initialized. */ this_cpu_enable_ftrace(); DBG(" <- early_setup()\n"); #ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX /* * This needs to be done *last* (after the above DBG() even) * * Right after we return from this function, we turn on the MMU * which means the real-mode access trick that btext does will * no longer work, it needs to switch to using a real MMU * mapping. This call will ensure that it does */ btext_map(); #endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */ }
/** * x86_acpi_suspend_lowlevel - save kernel state * * Create an identity mapped page table and copy the wakeup routine to * low memory. */ int x86_acpi_suspend_lowlevel(void) { struct wakeup_header *header = (struct wakeup_header *) __va(real_mode_header->wakeup_header); if (header->signature != WAKEUP_HEADER_SIGNATURE) { printk(KERN_ERR "wakeup header does not match\n"); return -EINVAL; } header->video_mode = saved_video_mode; header->pmode_behavior = 0; #ifndef CONFIG_64BIT native_store_gdt((struct desc_ptr *)&header->pmode_gdt); /* * We have to check that we can write back the value, and not * just read it. At least on 90 nm Pentium M (Family 6, Model * 13), reading an invalid MSR is not guaranteed to trap, see * Erratum X4 in "Intel Pentium M Processor on 90 nm Process * with 2-MB L2 Cache and Intel® Processor A100 and A110 on 90 * nm process with 512-KB L2 Cache Specification Update". */ if (!rdmsr_safe(MSR_EFER, &header->pmode_efer_low, &header->pmode_efer_high) && !wrmsr_safe(MSR_EFER, header->pmode_efer_low, header->pmode_efer_high)) header->pmode_behavior |= (1 << WAKEUP_BEHAVIOR_RESTORE_EFER); #endif /* !CONFIG_64BIT */ header->pmode_cr0 = read_cr0(); if (__this_cpu_read(cpu_info.cpuid_level) >= 0) { header->pmode_cr4 = read_cr4(); header->pmode_behavior |= (1 << WAKEUP_BEHAVIOR_RESTORE_CR4); } if (!rdmsr_safe(MSR_IA32_MISC_ENABLE, &header->pmode_misc_en_low, &header->pmode_misc_en_high) && !wrmsr_safe(MSR_IA32_MISC_ENABLE, header->pmode_misc_en_low, header->pmode_misc_en_high)) header->pmode_behavior |= (1 << WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE); header->realmode_flags = acpi_realmode_flags; header->real_magic = 0x12345678; #ifndef CONFIG_64BIT header->pmode_entry = (u32)&wakeup_pmode_return; header->pmode_cr3 = (u32)__pa_symbol(initial_page_table); saved_magic = 0x12345678; #else /* CONFIG_64BIT */ #ifdef CONFIG_SMP stack_start = (unsigned long)temp_stack + sizeof(temp_stack); early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(smp_processor_id()); initial_gs = per_cpu_offset(smp_processor_id()); #endif initial_code = (unsigned long)wakeup_long64; saved_magic = 0x123456789abcdef0L; #endif /* CONFIG_64BIT */ do_suspend_lowlevel(); return 0; }
void __init microcode_scan_module( unsigned long *module_map, const multiboot_info_t *mbi, void *(*bootmap)(const module_t *)) { module_t *mod = (module_t *)__va(mbi->mods_addr); uint64_t *_blob_start; unsigned long _blob_size; struct cpio_data cd; long offset; const char *p = NULL; int i; ucode_blob.size = 0; if ( !ucode_scan ) return; if ( boot_cpu_data.x86_vendor == X86_VENDOR_AMD ) p = "kernel/x86/microcode/AuthenticAMD.bin"; else if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ) p = "kernel/x86/microcode/GenuineIntel.bin"; else return; /* * Try all modules and see whichever could be the microcode blob. */ for ( i = 1 /* Ignore dom0 kernel */; i < mbi->mods_count; i++ ) { if ( !test_bit(i, module_map) ) continue; _blob_start = bootmap(&mod[i]); _blob_size = mod[i].mod_end; if ( !_blob_start ) { printk("Could not map multiboot module #%d (size: %ld)\n", i, _blob_size); continue; } cd.data = NULL; cd.size = 0; cd = find_cpio_data(p, _blob_start, _blob_size, &offset /* ignore */); if ( cd.data ) { /* * This is an arbitrary check - it would be sad if the blob * consumed most of the memory and did not allow guests * to launch. */ if ( cd.size > MAX_EARLY_CPIO_MICROCODE ) { printk("Multiboot %d microcode payload too big! (%ld, we can do %d)\n", i, cd.size, MAX_EARLY_CPIO_MICROCODE); goto err; } ucode_blob.size = cd.size; ucode_blob.data = xmalloc_bytes(cd.size); if ( !ucode_blob.data ) cd.data = NULL; else memcpy(ucode_blob.data, cd.data, cd.size); } bootmap(NULL); if ( cd.data ) break; } return; err: bootmap(NULL); }
/** * free_bootmem - mark a page range as usable * @addr: starting address of the range * @size: size of the range in bytes * * Partial pages will be considered reserved and left as they are. * * The range must be contiguous but may span node boundaries. */ void __init free_bootmem(unsigned long addr, unsigned long size) { kmemleak_free_part(__va(addr), size); memblock_x86_free_range(addr, addr + size); }