void homecache_finv_map_page(struct page *page, int home) { unsigned long flags; unsigned long va; pte_t *ptep; pte_t pte; if (home == PAGE_HOME_UNCACHED) return; local_irq_save(flags); #ifdef CONFIG_HIGHMEM va = __fix_to_virt(FIX_KMAP_BEGIN + kmap_atomic_idx_push() + (KM_TYPE_NR * smp_processor_id())); #else va = __fix_to_virt(FIX_HOMECACHE_BEGIN + smp_processor_id()); #endif ptep = virt_to_kpte(va); pte = pfn_pte(page_to_pfn(page), PAGE_KERNEL); __set_pte(ptep, pte_set_home(pte, home)); homecache_finv_page_va((void *)va, home); __pte_clear(ptep); hv_flush_page(va, PAGE_SIZE); #ifdef CONFIG_HIGHMEM kmap_atomic_idx_pop(); #endif local_irq_restore(flags); }
void set_pte_order(pte_t *ptep, pte_t pte, int order) { unsigned long pfn = pte_pfn(pte); struct page *page = pfn_to_page(pfn); /* Update the home of a PTE if necessary */ pte = pte_set_home(pte, page_home(page)); #ifdef __tilegx__ *ptep = pte; #else /* * When setting a PTE, write the high bits first, then write * the low bits. This sets the "present" bit only after the * other bits are in place. If a particular PTE update * involves transitioning from one valid PTE to another, it * may be necessary to call set_pte_order() more than once, * transitioning via a suitable intermediate state. * Note that this sequence also means that if we are transitioning * from any migrating PTE to a non-migrating one, we will not * see a half-updated PTE with the migrating bit off. */ #if HV_PTE_INDEX_PRESENT >= 32 || HV_PTE_INDEX_MIGRATING >= 32 # error Must write the present and migrating bits last #endif ((u32 *)ptep)[1] = (u32)(pte_val(pte) >> 32); barrier(); ((u32 *)ptep)[0] = (u32)(pte_val(pte)); #endif }
/* * Place a pointer to an L2 page table in a middle page * directory entry. */ static void __init assign_pte(pmd_t *pmd, pte_t *page_table) { phys_addr_t pa = __pa(page_table); unsigned long l2_ptfn = pa >> HV_LOG2_PAGE_TABLE_ALIGN; pte_t pteval = hv_pte_set_ptfn(__pgprot(_PAGE_TABLE), l2_ptfn); BUG_ON((pa & (HV_PAGE_TABLE_ALIGN-1)) != 0); pteval = pte_set_home(pteval, initial_heap_home()); *(pte_t *)pmd = pteval; if (page_table != (pte_t *)pmd_page_vaddr(*pmd)) BUG(); }
/* Combine a generic pgprot_t with cache home to get a cache-aware pgprot. */ static pgprot_t __init construct_pgprot(pgprot_t prot, int home) { prot = pte_set_home(prot, home); if (home == PAGE_HOME_IMMUTABLE) { if (ktext_hash) prot = hv_pte_set_mode(prot, HV_PTE_MODE_CACHE_HASH_L3); else prot = hv_pte_set_mode(prot, HV_PTE_MODE_CACHE_NO_L3); } return prot; }
void homecache_change_page_home(struct page *page, int order, int home) { int i, pages = (1 << order); unsigned long kva; BUG_ON(PageHighMem(page)); BUG_ON(page_count(page) > 1); BUG_ON(page_mapcount(page) != 0); kva = (unsigned long) page_address(page); flush_remote(0, HV_FLUSH_EVICT_L2, &cpu_cacheable_map, kva, pages * PAGE_SIZE, PAGE_SIZE, cpu_online_mask, NULL, 0); for (i = 0; i < pages; ++i, kva += PAGE_SIZE) { pte_t *ptep = virt_to_pte(NULL, kva); pte_t pteval = *ptep; BUG_ON(!pte_present(pteval) || pte_huge(pteval)); *ptep = pte_set_home(pteval, home); } }
static int ohci_hcd_tilegx_drv_probe(struct platform_device *pdev) { struct usb_hcd *hcd; struct tilegx_usb_platform_data *pdata = dev_get_platdata(&pdev->dev); pte_t pte = { 0 }; int my_cpu = smp_processor_id(); int ret; if (usb_disabled()) return -ENODEV; /* * Try to initialize our GXIO context; if we can't, the device * doesn't exist. */ if (gxio_usb_host_init(&pdata->usb_ctx, pdata->dev_index, 0) != 0) return -ENXIO; hcd = usb_create_hcd(&ohci_tilegx_hc_driver, &pdev->dev, dev_name(&pdev->dev)); if (!hcd) { ret = -ENOMEM; goto err_hcd; } /* * We don't use rsrc_start to map in our registers, but seems like * we ought to set it to something, so we use the register VA. */ hcd->rsrc_start = (ulong) gxio_usb_host_get_reg_start(&pdata->usb_ctx); hcd->rsrc_len = gxio_usb_host_get_reg_len(&pdata->usb_ctx); hcd->regs = gxio_usb_host_get_reg_start(&pdata->usb_ctx); tilegx_start_ohc(); /* Create our IRQs and register them. */ pdata->irq = create_irq(); if (pdata->irq < 0) { ret = -ENXIO; goto err_no_irq; } tile_irq_activate(pdata->irq, TILE_IRQ_PERCPU); /* Configure interrupts. */ ret = gxio_usb_host_cfg_interrupt(&pdata->usb_ctx, cpu_x(my_cpu), cpu_y(my_cpu), KERNEL_PL, pdata->irq); if (ret) { ret = -ENXIO; goto err_have_irq; } /* Register all of our memory. */ pte = pte_set_home(pte, PAGE_HOME_HASH); ret = gxio_usb_host_register_client_memory(&pdata->usb_ctx, pte, 0); if (ret) { ret = -ENXIO; goto err_have_irq; } ohci_hcd_init(hcd_to_ohci(hcd)); ret = usb_add_hcd(hcd, pdata->irq, IRQF_SHARED); if (ret == 0) { platform_set_drvdata(pdev, hcd); return ret; } err_have_irq: destroy_irq(pdata->irq); err_no_irq: tilegx_stop_ohc(); usb_put_hcd(hcd); err_hcd: gxio_usb_host_destroy(&pdata->usb_ctx); return ret; }
static int ehci_hcd_tilegx_drv_probe(struct platform_device *pdev) { struct usb_hcd *hcd; struct ehci_hcd *ehci; struct tilegx_usb_platform_data *pdata = dev_get_platdata(&pdev->dev); pte_t pte = { 0 }; int my_cpu = smp_processor_id(); int ret; if (usb_disabled()) return -ENODEV; /* * Try to initialize our GXIO context; if we can't, the device * doesn't exist. */ if (gxio_usb_host_init(&pdata->usb_ctx, pdata->dev_index, 1) != 0) return -ENXIO; hcd = usb_create_hcd(&ehci_tilegx_hc_driver, &pdev->dev, dev_name(&pdev->dev)); if (!hcd) { ret = -ENOMEM; goto err_hcd; } /* * We don't use rsrc_start to map in our registers, but seems like * we ought to set it to something, so we use the register VA. */ hcd->rsrc_start = (ulong) gxio_usb_host_get_reg_start(&pdata->usb_ctx); hcd->rsrc_len = gxio_usb_host_get_reg_len(&pdata->usb_ctx); hcd->regs = gxio_usb_host_get_reg_start(&pdata->usb_ctx); tilegx_start_ehc(); ehci = hcd_to_ehci(hcd); ehci->caps = hcd->regs; ehci->regs = hcd->regs + HC_LENGTH(ehci, readl(&ehci->caps->hc_capbase)); /* cache this readonly data; minimize chip reads */ ehci->hcs_params = readl(&ehci->caps->hcs_params); /* Create our IRQs and register them. */ pdata->irq = irq_alloc_hwirq(-1); if (!pdata->irq) { ret = -ENXIO; goto err_no_irq; } tile_irq_activate(pdata->irq, TILE_IRQ_PERCPU); /* Configure interrupts. */ ret = gxio_usb_host_cfg_interrupt(&pdata->usb_ctx, cpu_x(my_cpu), cpu_y(my_cpu), KERNEL_PL, pdata->irq); if (ret) { ret = -ENXIO; goto err_have_irq; } /* Register all of our memory. */ pte = pte_set_home(pte, PAGE_HOME_HASH); ret = gxio_usb_host_register_client_memory(&pdata->usb_ctx, pte, 0); if (ret) { ret = -ENXIO; goto err_have_irq; } ret = usb_add_hcd(hcd, pdata->irq, IRQF_SHARED); if (ret == 0) { platform_set_drvdata(pdev, hcd); device_wakeup_enable(hcd->self.controller); return ret; } err_have_irq: irq_free_hwirq(pdata->irq); err_no_irq: tilegx_stop_ehc(); usb_put_hcd(hcd); err_hcd: gxio_usb_host_destroy(&pdata->usb_ctx); return ret; }