void set_pte_order(pte_t *ptep, pte_t pte, int order) { unsigned long pfn = pte_pfn(pte); struct page *page = pfn_to_page(pfn); /* Update the home of a PTE if necessary */ pte = pte_set_home(pte, page_home(page)); #ifdef __tilegx__ *ptep = pte; #else /* * When setting a PTE, write the high bits first, then write * the low bits. This sets the "present" bit only after the * other bits are in place. If a particular PTE update * involves transitioning from one valid PTE to another, it * may be necessary to call set_pte_order() more than once, * transitioning via a suitable intermediate state. * Note that this sequence also means that if we are transitioning * from any migrating PTE to a non-migrating one, we will not * see a half-updated PTE with the migrating bit off. */ #if HV_PTE_INDEX_PRESENT >= 32 || HV_PTE_INDEX_MIGRATING >= 32 # error Must write the present and migrating bits last #endif ((u32 *)ptep)[1] = (u32)(pte_val(pte) >> 32); barrier(); ((u32 *)ptep)[0] = (u32)(pte_val(pte)); #endif }
static void homecache_finv_page_home(struct page *page, int home) { if (!PageHighMem(page) && home == page_home(page)) homecache_finv_page_va(page_address(page), home); else homecache_finv_map_page(page, home); }
/* * Return a mask of the cpus whose caches currently own these pages. * The return value is whether the pages are all coherently cached * (i.e. none are immutable, incoherent, or uncached). */ static int homecache_mask(struct page *page, int pages, struct cpumask *home_mask) { int i; int cached_coherently = 1; cpumask_clear(home_mask); for (i = 0; i < pages; ++i) { int home = page_home(&page[i]); if (home == PAGE_HOME_IMMUTABLE || home == PAGE_HOME_INCOHERENT) { cpumask_copy(home_mask, cpu_possible_mask); return 0; } #if CHIP_HAS_CBOX_HOME_MAP() if (home == PAGE_HOME_HASH) { cpumask_or(home_mask, home_mask, &hash_for_home_map); continue; } #endif if (home == PAGE_HOME_UNCACHED) { cached_coherently = 0; continue; } BUG_ON(home < 0 || home >= NR_CPUS); cpumask_set_cpu(home, home_mask); } return cached_coherently; }
static void homecache_finv_page_internal(struct page *page, int force_map) { int home = page_home(page); if (home == PAGE_HOME_UNCACHED) return; if (incoherent_home(home)) { int cpu; for_each_cpu(cpu, &cpu_cacheable_map) homecache_finv_map_page(page, cpu); } else if (force_map) { /* Force if, e.g., the normal mapping is migrating. */ homecache_finv_map_page(page, home); } else { homecache_finv_page_home(page, home); } sim_validate_lines_evicted(PFN_PHYS(page_to_pfn(page)), PAGE_SIZE); }
void flush_remote_page(struct page *page, int order) { int i, pages = (1 << order); for (i = 0; i < pages; ++i, ++page) { void *p = kmap_atomic(page); int hfh = 0; int home = page_home(page); #if CHIP_HAS_CBOX_HOME_MAP() if (home == PAGE_HOME_HASH) hfh = 1; else #endif BUG_ON(home < 0 || home >= NR_CPUS); finv_buffer_remote(p, PAGE_SIZE, hfh); kunmap_atomic(p); } }
/* Set up a single page for DMA access. */ static void __dma_prep_page(struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction) { /* * Flush the page from cache if necessary. * On tilegx, data is delivered to hash-for-home L3; on tilepro, * data is delivered direct to memory. * * NOTE: If we were just doing DMA_TO_DEVICE we could optimize * this to be a "flush" not a "finv" and keep some of the * state in cache across the DMA operation, but it doesn't seem * worth creating the necessary flush_buffer_xxx() infrastructure. */ int home = page_home(page); switch (home) { case PAGE_HOME_HASH: #ifdef __tilegx__ return; #endif break; case PAGE_HOME_UNCACHED: #ifdef __tilepro__ return; #endif break; case PAGE_HOME_IMMUTABLE: /* Should be going to the device only. */ BUG_ON(direction == DMA_FROM_DEVICE || direction == DMA_BIDIRECTIONAL); return; case PAGE_HOME_INCOHERENT: /* Incoherent anyway, so no need to work hard here. */ return; default: BUG_ON(home < 0 || home >= NR_CPUS); break; } homecache_finv_page(page); #ifdef DEBUG_ALIGNMENT /* Warn if the region isn't cacheline aligned. */ if (offset & (L2_CACHE_BYTES - 1) || (size & (L2_CACHE_BYTES - 1))) pr_warn("Unaligned DMA to non-hfh memory: PA %#llx/%#lx\n", PFN_PHYS(page_to_pfn(page)) + offset, size); #endif }
void updateLCDscreen() { static int last_update = 0; int diff = millis() - last_update; if (button_input != NONE || diff > 1000 || forceLCDupdate) { last_update = millis(); if (forceLCDupdate) forceLCDupdate = false; //Moved out since we call this regardless of page. lcd.home(); //Resets cursor // This switch cases are responsible for displaying different content // on the LCD switch (page) { case HOME: page_home(); break; case LIST_MODE: page_listMode(); break; case PICK_SCHEDULE: page_pickSchedule(); break; case MODIFY_SCHEDULE: page_modifySchedule(); break; case ADD_SCHEDULE: page_addSchedule(); break; case MODIFY_TIME: page_modifyTime(); break; }// switch (page) } }
/* Make the page ready to be read by the core. */ static void __dma_complete_page(struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction) { #ifdef __tilegx__ switch (page_home(page)) { case PAGE_HOME_HASH: /* I/O device delivered data the way the cpu wanted it. */ break; case PAGE_HOME_INCOHERENT: /* Incoherent anyway, so no need to work hard here. */ break; case PAGE_HOME_IMMUTABLE: /* Extra read-only copies are not a problem. */ break; default: /* Flush the bogus hash-for-home I/O entries to memory. */ homecache_finv_map_page(page, PAGE_HOME_HASH); break; } #endif }