int vm_do_unmap(addr_t virt, unsigned locked) { /* This gives the virtual address of the table needed, and sets * the correct place as zero */ #if CONFIG_SWAP if(current_task && num_swapdev && current_task->num_swapped) swap_in_page((task_t *)current_task, virt & PAGE_MASK); #endif if(kernel_task && (virt&PAGE_MASK) != PDIR_DATA && !locked) mutex_acquire(&pd_cur_data->lock); addr_t p = page_tables[(virt&PAGE_MASK)/0x1000]; page_tables[(virt&PAGE_MASK)/0x1000] = 0; asm("invlpg (%0)"::"r" (virt)); #if CONFIG_SMP if(kernel_task && (virt&PAGE_MASK) != PDIR_DATA) { if(IS_KERN_MEM(virt)) send_ipi(LAPIC_ICR_SHORT_OTHERS, 0, LAPIC_ICR_LEVELASSERT | LAPIC_ICR_TM_LEVEL | IPI_TLB); else if((IS_THREAD_SHARED_MEM(virt) && pd_cur_data->count > 1)) send_ipi(LAPIC_ICR_SHORT_OTHERS, 0, LAPIC_ICR_LEVELASSERT | LAPIC_ICR_TM_LEVEL | IPI_TLB); } #endif if(kernel_task && (virt&PAGE_MASK) != PDIR_DATA && !locked) mutex_release(&pd_cur_data->lock); if(p && !(p & PAGE_COW)) pm_free_page(p & PAGE_MASK); return 0; }
static int boot_cpu(imps_processor *proc) { int apicid = proc->apic_id, success = 1, to; unsigned bootaddr, accept_status; unsigned bios_reset_vector = PHYS_TO_VIRTUAL(BIOS_RESET_VECTOR); /* * Copy boot code for secondary CPUs here. Find it in between * "patch_code_start" and "patch_code_end" symbols. The other CPUs * will start there in 16-bit real mode under the 1MB boundary. * "patch_code_start" should be placed at a 4K-aligned address * under the 1MB boundary. */ extern char patch_code_start[]; extern char patch_code_end[]; bootaddr = (512-64)*1024; memcpy((char *)bootaddr, patch_code_start, patch_code_end - patch_code_start); /* * Generic CPU startup sequence starts here. */ /* set BIOS reset vector */ CMOS_WRITE_BYTE(CMOS_RESET_CODE, CMOS_RESET_JUMP); *((volatile unsigned *) bios_reset_vector) = ((bootaddr & 0xFF000) << 12); /* clear the APIC error register */ IMPS_LAPIC_WRITE(LAPIC_ESR, 0); accept_status = IMPS_LAPIC_READ(LAPIC_ESR); /* assert INIT IPI */ send_ipi(apicid, LAPIC_ICR_TM_LEVEL | LAPIC_ICR_LEVELASSERT | LAPIC_ICR_DM_INIT); UDELAY(10000); /* de-assert INIT IPI */ send_ipi(apicid, LAPIC_ICR_TM_LEVEL | LAPIC_ICR_DM_INIT); UDELAY(10000); /* * Send Startup IPIs if not an old pre-integrated APIC. */ if (proc->apic_ver >= APIC_VER_NEW) { int i; for (i = 1; i <= 2; i++) { send_ipi(apicid, LAPIC_ICR_DM_SIPI | ((bootaddr >> 12) & 0xFF)); UDELAY(1000); } }
int vm_do_unmap(addr_t virt, unsigned locked) { /* This gives the virtual address of the table needed, and sets * the correct place as zero */ #if CONFIG_SWAP if(current_task && num_swapdev && current_task->num_swapped) swap_in_page((task_t *)current_task, virt & PAGE_MASK); #endif addr_t vpage = (virt&PAGE_MASK)/0x1000; unsigned vp4 = PML4_IDX(vpage); unsigned vpdpt = PDPT_IDX(vpage); unsigned vdir = PAGE_DIR_IDX(vpage); unsigned vtbl = PAGE_TABLE_IDX(vpage); if(kernel_task && (virt&PAGE_MASK) != PDIR_DATA && !locked) mutex_acquire(&pd_cur_data->lock); page_dir_t *pd; page_table_t *pt; pdpt_t *pdpt; pml4_t *pml4; pml4 = (pml4_t *)((kernel_task && current_task) ? current_task->pd : kernel_dir); if(!pml4[vp4]) pml4[vp4] = pm_alloc_page() | PAGE_PRESENT | PAGE_WRITE; pdpt = (addr_t *)((pml4[vp4]&PAGE_MASK) + PHYS_PAGE_MAP); if(!pdpt[vpdpt]) pdpt[vpdpt] = pm_alloc_page() | PAGE_PRESENT | PAGE_WRITE; pd = (addr_t *)((pdpt[vpdpt]&PAGE_MASK) + PHYS_PAGE_MAP); if(!pd[vdir]) pd[vdir] = pm_alloc_page() | PAGE_PRESENT | PAGE_WRITE; pt = (addr_t *)((pd[vdir]&PAGE_MASK) + PHYS_PAGE_MAP); addr_t p = pt[vtbl]; pt[vtbl] = 0; asm("invlpg (%0)"::"r" (virt)); #if CONFIG_SMP if(kernel_task && (virt&PAGE_MASK) != PDIR_DATA) { if(IS_KERN_MEM(virt)) send_ipi(LAPIC_ICR_SHORT_OTHERS, 0, LAPIC_ICR_LEVELASSERT | LAPIC_ICR_TM_LEVEL | IPI_TLB); else if((IS_THREAD_SHARED_MEM(virt) && pd_cur_data->count > 1)) send_ipi(LAPIC_ICR_SHORT_OTHERS, 0, LAPIC_ICR_LEVELASSERT | LAPIC_ICR_TM_LEVEL | IPI_TLB); } #endif if(kernel_task && (virt&PAGE_MASK) != PDIR_DATA && !locked) mutex_release(&pd_cur_data->lock); if(p && !(p & PAGE_COW)) pm_free_page(p & PAGE_MASK); return 0; }
int wakeup_cpu(unsigned int cpu) { zynq_core_map |= 1 << cpu; cpu_flush_cache_all(); send_ipi(cpumask_of_cpu(cpu), 1); return 0; }
int vm_map(addr_t virt, addr_t phys, unsigned attr, unsigned opt) { addr_t vpage = (virt&PAGE_MASK)/0x1000; unsigned vp4 = PML4_IDX(vpage); unsigned vpdpt = PDPT_IDX(vpage); unsigned vdir = PAGE_DIR_IDX(vpage); unsigned vtbl = PAGE_TABLE_IDX(vpage); if(kernel_task && !(opt & MAP_PDLOCKED)) mutex_acquire(&pd_cur_data->lock); page_dir_t *pd; page_table_t *pt; pdpt_t *pdpt; pml4_t *pml4; pml4 = (pml4_t *)((kernel_task && current_task) ? current_task->pd : kernel_dir); if(!pml4[vp4]) pml4[vp4] = pm_alloc_page_zero() | PAGE_PRESENT | PAGE_WRITE | (attr & PAGE_USER); pdpt = (addr_t *)((pml4[vp4]&PAGE_MASK) + PHYS_PAGE_MAP); if(!pdpt[vpdpt]) pdpt[vpdpt] = pm_alloc_page_zero() | PAGE_PRESENT | PAGE_WRITE | (attr & PAGE_USER); pd = (addr_t *)((pdpt[vpdpt]&PAGE_MASK) + PHYS_PAGE_MAP); if(!pd[vdir]) pd[vdir] = pm_alloc_page_zero() | PAGE_PRESENT | PAGE_WRITE | (attr & PAGE_USER); pt = (addr_t *)((pd[vdir]&PAGE_MASK) + PHYS_PAGE_MAP); pt[vtbl] = (phys & PAGE_MASK) | attr; asm("invlpg (%0)"::"r" (virt)); if(!(opt & MAP_NOCLEAR)) memset((void *)(virt&PAGE_MASK), 0, 0x1000); #if CONFIG_SMP if(kernel_task) { if(IS_KERN_MEM(virt)) send_ipi(LAPIC_ICR_SHORT_OTHERS, 0, LAPIC_ICR_LEVELASSERT | LAPIC_ICR_TM_LEVEL | IPI_TLB); else if((IS_THREAD_SHARED_MEM(virt) && pd_cur_data->count > 1)) send_ipi(LAPIC_ICR_SHORT_OTHERS, 0, LAPIC_ICR_LEVELASSERT | LAPIC_ICR_TM_LEVEL | IPI_TLB); } #endif if(kernel_task && !(opt & MAP_PDLOCKED)) mutex_release(&pd_cur_data->lock); return 0; }
int vm_do_unmap_only(addr_t virt, unsigned locked) { #if CONFIG_SWAP if(current_task && num_swapdev && current_task->num_swapped) swap_in_page((task_t *)current_task, virt & PAGE_MASK); #endif if(kernel_task && (virt&PAGE_MASK) != PDIR_DATA && !locked) mutex_acquire(&pd_cur_data->lock); page_tables[(virt&PAGE_MASK)/0x1000] = 0; asm("invlpg (%0)"::"r" (virt)); #if CONFIG_SMP if(kernel_task && (virt&PAGE_MASK) != PDIR_DATA) { if(IS_KERN_MEM(virt)) send_ipi(LAPIC_ICR_SHORT_OTHERS, 0, LAPIC_ICR_LEVELASSERT | LAPIC_ICR_TM_LEVEL | IPI_TLB); else if((IS_THREAD_SHARED_MEM(virt) && pd_cur_data->count > 1)) send_ipi(LAPIC_ICR_SHORT_OTHERS, 0, LAPIC_ICR_LEVELASSERT | LAPIC_ICR_TM_LEVEL | IPI_TLB); } #endif if(kernel_task && (virt&PAGE_MASK) != PDIR_DATA && !locked) mutex_release(&pd_cur_data->lock); return 0; }
/* TLB flushing * * Flush needs to be done on the local CPU and on any other CPU that * may have the same mapping. The mm->cpu_vm_mask is used to keep track * of which CPUs that a specific process has been executed on. */ void flush_tlb_common(struct mm_struct* mm, struct vm_area_struct* vma, unsigned long addr) { unsigned long flags; cpumask_t cpu_mask; spin_lock_irqsave(&tlbstate_lock, flags); cpu_mask = (mm == FLUSH_ALL ? CPU_MASK_ALL : mm->cpu_vm_mask); cpu_clear(smp_processor_id(), cpu_mask); flush_mm = mm; flush_vma = vma; flush_addr = addr; send_ipi(IPI_FLUSH_TLB, 1, cpu_mask); spin_unlock_irqrestore(&tlbstate_lock, flags); }
intptr_t lrt_pic_ipi(uintptr_t id) { lapic_icr_low icr_low; icr_low.raw = 0; icr_low.vector = lrt_pic_getIPIvec(); //just picked a vector icr_low.level = 1; //must be for a fixed ipi icr_low.destination_shorthand = 0; //no shorthand lapic_icr_high icr_high; icr_high.raw = 0; icr_high.destination = id; send_ipi(icr_low, icr_high); return 1; }
/* Bring one cpu online.*/ static int __init smp_boot_one_cpu(int cpuid) { unsigned timeout; struct task_struct *idle; cpumask_t cpu_mask = CPU_MASK_NONE; idle = fork_idle(cpuid); if (IS_ERR(idle)) panic("SMP: fork failed for CPU:%d", cpuid); task_thread_info(idle)->cpu = cpuid; /* Information to the CPU that is about to boot */ smp_init_current_idle_thread = task_thread_info(idle); cpu_now_booting = cpuid; /* Kick it */ cpu_set(cpuid, cpu_online_map); cpu_set(cpuid, cpu_mask); send_ipi(IPI_BOOT, 0, cpu_mask); cpu_clear(cpuid, cpu_online_map); /* Wait for CPU to come online */ for (timeout = 0; timeout < 10000; timeout++) { if(cpu_online(cpuid)) { cpu_now_booting = 0; smp_init_current_idle_thread = NULL; return 0; /* CPU online */ } udelay(100); barrier(); } put_task_struct(idle); idle = NULL; printk(KERN_CRIT "SMP: CPU:%d is stuck.\n", cpuid); return -1; }
int vmm_poke_guest(struct proc *p, int guest_pcoreid) { struct guest_pcore *gpc; int pcoreid; gpc = lookup_guest_pcore(p, guest_pcoreid); if (!gpc) { set_error(ENOENT, "Bad guest_pcoreid %d", guest_pcoreid); return -1; } /* We're doing an unlocked peek; it could change immediately. This is a * best effort service. */ pcoreid = ACCESS_ONCE(gpc->cpu); if (pcoreid == -1) { /* So we know that we'll miss the poke for the posted IRQ. We could * return an error. However, error handling for this case isn't * particularly helpful (yet). The absence of the error does not mean * the IRQ was posted. We'll still return 0, meaning "the user didn't * mess up; we tried." */ return 0; } send_ipi(pcoreid, I_POKE_CORE); return 0; }
static int boot_cpu(imps_processor *proc) { int apicid = proc->apic_id, success = 1, to; unsigned accept_status; unsigned bios_reset_vector = (int)PHYS_TO_VIRTUAL(BIOS_RESET_VECTOR); int ver = IMPS_LAPIC_READ(LAPIC_VER); SHOW_FLOW( 0, "APIC ver = 0x%x (%d)", ver, APIC_VERSION(ver) ); // TODO define size? guard page? ap_stack = calloc(1, 64*1024); /* * Copy boot code for secondary CPUs here. Find it in between * "patch_code_start" and "patch_code_end" symbols. The other CPUs * will start there in 16-bit real mode under the 1MB boundary. * "patch_code_start" should be placed at a 4K-aligned address * under the 1MB boundary. */ //return 0; //panic("boot SMP cpu code is not ready"); //extern char patch_code_start[]; //extern char patch_code_end[]; //bootaddr = (512-64)*1024; //memcpy((char *)bootaddr, patch_code_start, patch_code_end - patch_code_start); install_ap_tramp((void *)bootaddr); smp_ap_booted = 0; //dump_mp_gdt((void *)&MP_GDT); /* * Generic CPU startup sequence starts here. */ /* set BIOS reset vector */ CMOS_WRITE_BYTE(CMOS_RESET_CODE, CMOS_RESET_JUMP); //*((volatile unsigned *) bios_reset_vector) = ((bootaddr & 0xFF000) << 12); //*((volatile unsigned *) bios_reset_vector) = ((bootaddr & 0xFF000) << 12); *((volatile unsigned short *) 0x469) = bootaddr >> 4; *((volatile unsigned short *) 0x467) = bootaddr & 0xf; /* clear the APIC error register */ IMPS_LAPIC_WRITE(LAPIC_ESR, 0); accept_status = IMPS_LAPIC_READ(LAPIC_ESR); /* assert INIT IPI */ send_ipi(apicid, LAPIC_ICR_TM_LEVEL | LAPIC_ICR_LEVELASSERT | LAPIC_ICR_DM_INIT); //UDELAY(10000); phantom_spinwait(10); /* de-assert INIT IPI */ send_ipi(apicid, LAPIC_ICR_TM_LEVEL | LAPIC_ICR_DM_INIT); phantom_spinwait(10); //UDELAY(10000); #if 1 /* * Send Startup IPIs if not an old pre-integrated APIC. */ if (proc->apic_ver >= APIC_VER_NEW) { int i; for (i = 1; i <= 2; i++) { send_ipi(apicid, LAPIC_ICR_DM_SIPI | ((bootaddr >> 12) & 0xFF)); //UDELAY(1000); phantom_spinwait(1); } }
static void broadcast(const struct cpumask *mask) { send_ipi(mask, IPI_TIMER); }
int acoral_link_app(App_Info appInfo) { struct file *fp; //file pointer mm_segment_t old_fs; loff_t pos = 0; void __iomem *add; //temp address unsigned int file_len = 0; //file length int index; //which appMemInfo struct timeval start,stop; //App_para *para; WORD_b *app_blk = 0; //store app.o WORD_b *para_blk = 0; //store app para struct WORD_b *ret_blk = 0; //store app return value do_gettimeofday(&start); //App_Info appInfo = appInfo_t; //printk("filename:%s\n", appInfo.filename); //printk("para:%x\n", appInfo.para); //printk("ret:%x\n", appInfo.ret); //printk("parasize:%d\n",appInfo.parasize); //printk("acoral enter\n"); fp = filp_open(appInfo.filename, O_RDONLY , 0644); if (IS_ERR(fp)) { printk("create file error\n"); return -1; } old_fs = get_fs(); //get_fs是取得当前的地址访问限制值 set_fs(KERNEL_DS); //set_fs是设置当前的地址访问限制值 KERNEL_DS会跳过地址检查 file_len = vfs_llseek(fp, 0, SEEK_END);//get file length /*find free appMemInfo struct*/ local_irq_disable(); while((index = find_and_set_bit_appMemInfo(appMemInfo_bitmap)) < 0); local_irq_enable(); printk("index: %d\n", index); app_blk = AllocBuddy(AllList, file_len); if(app_blk == 0) { printk("alloc error\n"); return -1; } para_blk = AllocBuddy(AllList, appInfo.parasize); if(para_blk == 0) { printk("para_blk error\n"); return -1; } ret_blk = AllocBuddy(AllList, appInfo.retsize); if(ret_blk == 0) { printk("ret_blk error\n"); return -1; } //translate the virtuel address to physical address memcpy(para_blk->addr, appInfo.para, appInfo.parasize); appMemInfo[index].addr = app_blk->addr - first_blk_phy + ALLOC_MEM_START;//virt_to_bus(blk->addr); appMemInfo[index].para = para_blk->addr - first_blk_phy + ALLOC_MEM_START;//virt_to_bus(para_blk->addr); appMemInfo[index].ret = ret_blk->addr - first_blk_phy + ALLOC_MEM_START; appMemInfo[index].com_index = index; /////which com appMemInfo[index].prio = appInfo.prio; /* memcpy(&appMemInfo[1], &appMemInfo[0], sizeof(App_Mem_Info)); memcpy(&appMemInfo[2], &appMemInfo[0], sizeof(App_Mem_Info)); acoral_tasks = 3; appMemInfo[0].task_start = 0; appMemInfo[1].task_start = 150; appMemInfo[2].task_start = 300; */ // printk("blk_addr:%x\n", appMemInfo[index].addr); //printk("para: %x\n", appMemInfo[index].para); //printk("ret: %x\n", appMemInfo[index].ret); printk("prio:%d\n", appMemInfo[index].prio); //para = (App_para *)(para_blk->addr); //printk("%d : %d\n", para->para1, para->para2); add = (void __iomem *)(app_blk->addr); if(vfs_read(fp, add, file_len, &pos) != file_len) { printk("vfs_read err\n"); return -1; } //printk("read file length: %d\n", file_len); filp_close(fp, NULL); set_fs(old_fs); //恢复地址访问限制值 do_gettimeofday(&stop); timeval_subtract("para_time",&start,&stop); send_ipi(ACORAL_IPI_INT3_CPU1_APP); // wait_for_completion(&memInfo[index].com); //init_completion(&acoral_com); wait_for_completion(&acoral_com[index]); //printk("result:%d ret: %d\n", *(ret_blk->addr), *(int *)appInfo.ret); //printk("retadd:%x\n", appInfo.ret); do_gettimeofday(&start); memset(&appMemInfo[index], 0, sizeof(App_Mem_Info)); if(copy_to_user(appInfo.ret, ret_blk->addr, appInfo.retsize)) return -EFAULT; // free_bit_appMemInfo(index, appMemInfo_bitmap); FreeBuddy(AllList, app_blk); FreeBuddy(AllList, para_blk); FreeBuddy(AllList, ret_blk); do_gettimeofday(&stop); timeval_subtract("trans_ret_time",&start,&stop); //return appMemInfo[index].ret;; return 0; }
void smp_send_reschedule(int cpu) { cpumask_t cpu_mask = CPU_MASK_NONE; cpu_set(cpu, cpu_mask); send_ipi(IPI_SCHEDULE, 0, cpu_mask); }