static int alloc_ldt(mm_context_t *pc, int mincount, int reload) { void *oldldt, *newldt; int oldsize; if (mincount <= pc->size) return 0; oldsize = pc->size; mincount = (mincount + (PAGE_SIZE / LDT_ENTRY_SIZE - 1)) & (~(PAGE_SIZE / LDT_ENTRY_SIZE - 1)); if (mincount * LDT_ENTRY_SIZE > PAGE_SIZE) newldt = vmalloc(mincount * LDT_ENTRY_SIZE); else newldt = (void *)__get_free_page(GFP_KERNEL); if (!newldt) return -ENOMEM; if (oldsize) memcpy(newldt, pc->ldt, oldsize * LDT_ENTRY_SIZE); oldldt = pc->ldt; memset(newldt + oldsize * LDT_ENTRY_SIZE, 0, (mincount - oldsize) * LDT_ENTRY_SIZE); paravirt_alloc_ldt(newldt, mincount); #ifdef CONFIG_X86_64 /* CHECKME: Do we really need this ? */ wmb(); #endif pc->ldt = newldt; wmb(); pc->size = mincount; wmb(); if (reload) { #ifdef CONFIG_SMP preempt_disable(); load_LDT(pc); if (!cpumask_equal(mm_cpumask(current->mm), cpumask_of(smp_processor_id()))) smp_call_function(flush_ldt, current->mm, 1); preempt_enable(); #else load_LDT(pc); #endif } if (oldsize) { paravirt_free_ldt(oldldt, oldsize); if (oldsize * LDT_ENTRY_SIZE > PAGE_SIZE) vfree(oldldt); else put_page(virt_to_page(oldldt)); } return 0; }
static int alloc_ldt(mm_context_t *pc, unsigned mincount, int reload) { void *oldldt; void *newldt; unsigned oldsize; if (mincount <= (unsigned)pc->size) return 0; oldsize = pc->size; mincount = (mincount+511)&(~511); if (mincount*LDT_ENTRY_SIZE > PAGE_SIZE) newldt = vmalloc(mincount*LDT_ENTRY_SIZE); else newldt = kmalloc(mincount*LDT_ENTRY_SIZE, GFP_KERNEL); if (!newldt) return -ENOMEM; if (oldsize) memcpy(newldt, pc->ldt, oldsize*LDT_ENTRY_SIZE); oldldt = pc->ldt; memset(newldt+oldsize*LDT_ENTRY_SIZE, 0, (mincount-oldsize)*LDT_ENTRY_SIZE); wmb(); pc->ldt = newldt; wmb(); pc->size = mincount; wmb(); if (reload) { #ifdef CONFIG_SMP cpumask_t mask; preempt_disable(); mask = cpumask_of_cpu(smp_processor_id()); load_LDT(pc); if (!cpus_equal(current->mm->cpu_vm_mask, mask)) smp_call_function(flush_ldt, 0, 1, 1); preempt_enable(); #else load_LDT(pc); #endif } if (oldsize) { if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE) vfree(oldldt); else kfree(oldldt); } return 0; }
static void fix_processor_context(void) { int cpu = smp_processor_id(); struct tss_struct *t = &per_cpu(init_tss, cpu); set_tss_desc(cpu, t); /* * This just modifies memory; should not be * necessary. But... This is necessary, because * 386 hardware has concept of busy TSS or some * similar stupidity. */ load_TR_desc(); /* This does ltr */ load_LDT(¤t->active_mm->context); /* This does lldt */ /* * Now maybe reload the debug registers */ if (current->thread.debugreg7) { set_debugreg(current->thread.debugreg0, 0); set_debugreg(current->thread.debugreg1, 1); set_debugreg(current->thread.debugreg2, 2); set_debugreg(current->thread.debugreg3, 3); /* no 4 and 5 */ set_debugreg(current->thread.debugreg6, 6); set_debugreg(current->thread.debugreg7, 7); } }
static int do_rst_ldt(struct cpt_obj_bits *li, loff_t pos, struct cpt_context *ctx) { struct mm_struct *mm = current->mm; int i; int err; int size; err = __alloc_ldt(&mm->context, li->cpt_size/LDT_ENTRY_SIZE); if (err) return err; size = mm->context.size*LDT_ENTRY_SIZE; for (i = 0; i < size; i += PAGE_SIZE) { int nr = i / PAGE_SIZE, bytes; char *kaddr = kmap(mm->context.ldt_pages[nr]); bytes = size - i; if (bytes > PAGE_SIZE) bytes = PAGE_SIZE; err = ctx->pread(kaddr, bytes, ctx, pos + li->cpt_hdrlen + i); kunmap(mm->context.ldt_pages[nr]); if (err) return err; } load_LDT(&mm->context); return 0; }
static int do_rst_ldt(struct cpt_obj_bits *li, loff_t pos, struct cpt_context *ctx) { struct mm_struct *mm = current->mm; int oldsize = mm->context.size; void *oldldt; void *newldt; int err; if (li->cpt_size > PAGE_SIZE) newldt = vmalloc(li->cpt_size); else newldt = kmalloc(li->cpt_size, GFP_KERNEL); if (!newldt) return -ENOMEM; err = ctx->pread(newldt, li->cpt_size, ctx, pos + li->cpt_hdrlen); if (err) return err; oldldt = mm->context.ldt; mm->context.ldt = newldt; mm->context.size = li->cpt_size/LDT_ENTRY_SIZE; load_LDT(&mm->context); if (oldsize) { if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE) vfree(oldldt); else kfree(oldldt); } return 0; }
static int alloc_ldt(mm_context_t *pc, int mincount, int reload) { int oldsize, newsize, i; if (mincount <= pc->size) return 0; /* * LDT got larger - reallocate if necessary. */ oldsize = pc->size; mincount = (mincount+511)&(~511); newsize = mincount*LDT_ENTRY_SIZE; for (i = 0; i < newsize; i += PAGE_SIZE) { int nr = i/PAGE_SIZE; BUG_ON(i >= 64*1024); if (!pc->ldt_pages[nr]) { pc->ldt_pages[nr] = alloc_page(GFP_HIGHUSER); if (!pc->ldt_pages[nr]) return -ENOMEM; clear_highpage(pc->ldt_pages[nr]); } } pc->size = mincount; if (reload) { #ifdef CONFIG_SMP local_irq_disable(); #endif load_LDT(pc); #ifdef CONFIG_SMP local_irq_enable(); if (current->mm->cpu_vm_mask != (1<<smp_processor_id())) smp_call_function(flush_ldt, 0, 1, 1); #endif } return 0; }
/* * linux/arch/x86_64/kernel/ldt.c * * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds * Copyright (C) 1999 Ingo Molnar <*****@*****.**> * Copyright (C) 2002 Andi Kleen * * This handles calls from both 32bit and 64bit mode. */ #include <linux/errno.h> #include <linux/sched.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/smp_lock.h> #include <linux/vmalloc.h> #include <linux/slab.h> #include <asm/uaccess.h> #include <asm/system.h> #include <asm/ldt.h> #include <asm/desc.h> #include <asm/proto.h> #include <asm/pgalloc.h> #ifdef CONFIG_SMP /* avoids "defined but not used" warnig */ static void flush_ldt(void *null) { if (current->active_mm) load_LDT(¤t->active_mm->context); }