int main (int argc, char **argv) { const char *space; int i, num_tabs; size_t len; printf ("#ifndef _ASM_IA64_OFFSETS_H\n"); printf ("#define _ASM_IA64_OFFSETS_H\n\n"); printf ("/*\n * DO NOT MODIFY\n *\n * This file was generated by " "arch/ia64/tools/print_offsets.\n *\n */\n\n"); /* This is stretching things a bit, but entry.S needs the bit number for PT_PTRACED and it can't include <linux/sched.h> so this seems like a reasonably solution. At least the code won't break in subtle ways should PT_PTRACED ever change. Ditto for PT_TRACESYS_BIT. */ printf ("#define PT_PTRACED_BIT\t\t\t%u\n", ffs (PT_PTRACED) - 1); printf ("#define PT_TRACESYS_BIT\t\t\t%u\n", ffs (PT_TRACESYS) - 1); printf ("#define PT_AUDITED_BIT\t\t\t%u\n", ffs (PT_AUDITED) - 1); printf ("#define PT_TRACEAUDITMASK\t\t0x%x\n\n", PT_TRACESYS|PT_AUDITED|PT_SINGLESTEP); for (i = 0; i < sizeof (tab) / sizeof (tab[0]); ++i) { if (tab[i].name[0] == '\0') printf ("\n"); else { len = strlen (tab[i].name); num_tabs = (40 - len) / 8; if (num_tabs <= 0) space = " "; else space = strchr(tabs, '\0') - (40 - len) / 8; printf ("#define %s%s%lu\t/* 0x%lx */\n", tab[i].name, space, tab[i].value, tab[i].value); } } printf ("\n#define CLONE_IDLETASK_BIT %ld\n", ia64_fls (CLONE_IDLETASK)); printf ("\n#define CLONE_SETTLS_BIT %ld\n", ia64_fls (CLONE_SETTLS)); printf ("\n#endif /* _ASM_IA64_OFFSETS_H */\n"); return 0; }
void flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; unsigned long size = end - start; unsigned long nbits; if (mm != current->active_mm) { /* this does happen, but perhaps it's not worth optimizing for? */ #ifdef CONFIG_SMP flush_tlb_all(); #else mm->context = 0; #endif return; } nbits = ia64_fls(size + 0xfff); while (unlikely (((1UL << nbits) & purge.mask) == 0) && (nbits < purge.max_bits)) ++nbits; if (nbits > purge.max_bits) nbits = purge.max_bits; start &= ~((1UL << nbits) - 1); # ifdef CONFIG_SMP platform_global_tlb_purge(start, end, nbits); # else do { ia64_ptcl(start, (nbits<<2)); start += (1UL << nbits); } while (start < end); # endif ia64_srlz_i(); /* srlz.i implies srlz.d */ }
/* * Encapsulate access to the itm structure for SMP. */ void ia64_cpu_local_tick (void) { int cpu = smp_processor_id(); unsigned long shift = 0, delta; /* arrange for the cycle counter to generate a timer interrupt: */ ia64_set_itv(IA64_TIMER_VECTOR); delta = local_cpu_data->itm_delta; /* * Stagger the timer tick for each CPU so they don't occur all at (almost) the * same time: */ if (cpu) { unsigned long hi = 1UL << ia64_fls(cpu); shift = (2*(cpu - hi) + 1) * delta/hi/2; } local_cpu_data->itm_next = ia64_get_itc() + delta + shift; ia64_set_itm(local_cpu_data->itm_next); }
void __init ia64_tlb_init (void) { ia64_ptce_info_t ptce_info; unsigned long tr_pgbits; long status; if ((status = ia64_pal_vm_page_size(&tr_pgbits, &purge.mask)) != 0) { printk(KERN_ERR "PAL_VM_PAGE_SIZE failed with status=%ld;" "defaulting to architected purge page-sizes.\n", status); purge.mask = 0x115557000; } purge.max_bits = ia64_fls(purge.mask); ia64_get_ptce(&ptce_info); local_cpu_data->ptce_base = ptce_info.base; local_cpu_data->ptce_count[0] = ptce_info.count[0]; local_cpu_data->ptce_count[1] = ptce_info.count[1]; local_cpu_data->ptce_stride[0] = ptce_info.stride[0]; local_cpu_data->ptce_stride[1] = ptce_info.stride[1]; local_flush_tlb_all(); /* nuke left overs from bootstrapping... */ }