int amd64_set_ldt(struct proc *p, void *args, register_t *retval) { int error, i, n; struct pcb *pcb = &p->p_addr->u_pcb; pmap_t pmap = p->p_vmspace->vm_map.pmap; struct amd64_set_ldt_args ua; union descriptor desc; if ((error = copyin(args, &ua, sizeof(ua))) != 0) return (error); #ifdef LDT_DEBUG printf("amd64_set_ldt: start=%d num=%d descs=%p\n", ua.start, ua.num, ua.desc); #endif if (ua.start < 0 || ua.num < 0) return (EINVAL); if (ua.start > 8192 || (ua.start + ua.num) > 8192) return (EINVAL); /* * XXX LOCKING */ /* allocate user ldt */ if (pmap->pm_ldt == 0 || (ua.start + ua.num) > pmap->pm_ldt_len) { size_t old_len, new_len; union descriptor *old_ldt, *new_ldt; if (pmap->pm_flags & PMF_USER_LDT) { old_len = pmap->pm_ldt_len * sizeof(union descriptor); old_ldt = pmap->pm_ldt; } else { old_len = NLDT * sizeof(union descriptor); old_ldt = ldt; pmap->pm_ldt_len = 512; } while ((ua.start + ua.num) > pmap->pm_ldt_len) pmap->pm_ldt_len *= 2; new_len = pmap->pm_ldt_len * sizeof(union descriptor); new_ldt = (union descriptor *)uvm_km_alloc(kernel_map, new_len); memcpy(new_ldt, old_ldt, old_len); memset((caddr_t)new_ldt + old_len, 0, new_len - old_len); pmap->pm_ldt = new_ldt; if (pmap->pm_flags & PCB_USER_LDT) ldt_free(pmap); else pmap->pm_flags |= PCB_USER_LDT; ldt_alloc(pmap, new_ldt, new_len); pcb->pcb_ldt_sel = pmap->pm_ldt_sel; if (pcb == curpcb) lldt(pcb->pcb_ldt_sel); /* * XXX Need to notify other processors which may be * XXX currently using this pmap that they need to * XXX re-load the LDT. */ if (old_ldt != ldt) uvm_km_free(kernel_map, (vaddr_t)old_ldt, old_len); #ifdef LDT_DEBUG printf("amd64_set_ldt(%d): new_ldt=%p\n", p->p_pid, new_ldt); #endif } if (pcb == curpcb) savectx(curpcb); error = 0; /* Check descriptors for access violations. */ for (i = 0, n = ua.start; i < ua.num; i++, n++) { if ((error = copyin(&ua.desc[i], &desc, sizeof(desc))) != 0) return (error); switch (desc.sd.sd_type) { case SDT_SYSNULL: desc.sd.sd_p = 0; break; case SDT_SYS286CGT: case SDT_SYS386CGT: /* * Only allow call gates targeting a segment * in the LDT or a user segment in the fixed * part of the gdt. Segments in the LDT are * constrained (below) to be user segments. */ if (desc.gd.gd_p != 0 && !ISLDT(desc.gd.gd_selector) && ((IDXSEL(desc.gd.gd_selector) >= NGDT) || (gdt[IDXSEL(desc.gd.gd_selector)].sd.sd_dpl != SEL_UPL))) return (EACCES); break; case SDT_MEMEC: case SDT_MEMEAC: case SDT_MEMERC: case SDT_MEMERAC: /* Must be "present" if executable and conforming. */ if (desc.sd.sd_p == 0) return (EACCES); break; case SDT_MEMRO: case SDT_MEMROA: case SDT_MEMRW: case SDT_MEMRWA: case SDT_MEMROD: case SDT_MEMRODA: case SDT_MEMRWD: case SDT_MEMRWDA: case SDT_MEME: case SDT_MEMEA: case SDT_MEMER: case SDT_MEMERA: break; default: /* Only care if it's present. */ if (desc.sd.sd_p != 0) return (EACCES); break; } if (desc.sd.sd_p != 0) { /* Only user (ring-3) descriptors may be present. */ if (desc.sd.sd_dpl != SEL_UPL) return (EACCES); } } /* Now actually replace the descriptors. */ for (i = 0, n = ua.start; i < ua.num; i++, n++) { if ((error = copyin(&ua.desc[i], &desc, sizeof(desc))) != 0) goto out; pmap->pm_ldt[n] = desc; } *retval = ua.start; out: return (error); }
int i386_set_ldt(struct proc *p, void *args, register_t *retval) { int error, i, n; struct pcb *pcb = &p->p_addr->u_pcb; pmap_t pmap = p->p_vmspace->vm_map.pmap; struct i386_set_ldt_args ua; union descriptor *descv; size_t old_len, new_len, ldt_len; union descriptor *old_ldt, *new_ldt; if (user_ldt_enable == 0) return (ENOSYS); if ((error = copyin(args, &ua, sizeof(ua))) != 0) return (error); if (ua.start < 0 || ua.num < 0 || ua.start > 8192 || ua.num > 8192 || ua.start + ua.num > 8192) return (EINVAL); descv = malloc(sizeof (*descv) * ua.num, M_TEMP, M_NOWAIT); if (descv == NULL) return (ENOMEM); if ((error = copyin(ua.desc, descv, sizeof (*descv) * ua.num)) != 0) goto out; /* Check descriptors for access violations. */ for (i = 0; i < ua.num; i++) { union descriptor *desc = &descv[i]; switch (desc->sd.sd_type) { case SDT_SYSNULL: desc->sd.sd_p = 0; break; case SDT_SYS286CGT: case SDT_SYS386CGT: /* * Only allow call gates targeting a segment * in the LDT or a user segment in the fixed * part of the gdt. Segments in the LDT are * constrained (below) to be user segments. */ if (desc->gd.gd_p != 0 && !ISLDT(desc->gd.gd_selector) && ((IDXSEL(desc->gd.gd_selector) >= NGDT) || (gdt[IDXSEL(desc->gd.gd_selector)].sd.sd_dpl != SEL_UPL))) { error = EACCES; goto out; } break; case SDT_MEMEC: case SDT_MEMEAC: case SDT_MEMERC: case SDT_MEMERAC: /* Must be "present" if executable and conforming. */ if (desc->sd.sd_p == 0) { error = EACCES; goto out; } break; case SDT_MEMRO: case SDT_MEMROA: case SDT_MEMRW: case SDT_MEMRWA: case SDT_MEMROD: case SDT_MEMRODA: case SDT_MEMRWD: case SDT_MEMRWDA: case SDT_MEME: case SDT_MEMEA: case SDT_MEMER: case SDT_MEMERA: break; default: /* * Make sure that unknown descriptor types are * not marked present. */ if (desc->sd.sd_p != 0) { error = EACCES; goto out; } break; } if (desc->sd.sd_p != 0) { /* Only user (ring-3) descriptors may be present. */ if (desc->sd.sd_dpl != SEL_UPL) { error = EACCES; goto out; } } } /* allocate user ldt */ simple_lock(&pmap->pm_lock); if (pmap->pm_ldt == 0 || (ua.start + ua.num) > pmap->pm_ldt_len) { if (pmap->pm_flags & PMF_USER_LDT) ldt_len = pmap->pm_ldt_len; else ldt_len = 512; while ((ua.start + ua.num) > ldt_len) ldt_len *= 2; new_len = ldt_len * sizeof(union descriptor); simple_unlock(&pmap->pm_lock); new_ldt = km_alloc(round_page(new_len), &kv_any, &kp_dirty, &kd_nowait); if (new_ldt == NULL) { error = ENOMEM; goto out; } simple_lock(&pmap->pm_lock); if (pmap->pm_ldt != NULL && ldt_len <= pmap->pm_ldt_len) { /* * Another thread (re)allocated the LDT to * sufficient size while we were blocked in * km_alloc. Oh well. The new entries * will quite probably not be right, but * hey.. not our problem if user applications * have race conditions like that. */ km_free(new_ldt, round_page(new_len), &kv_any, &kp_dirty); goto copy; } old_ldt = pmap->pm_ldt; if (old_ldt != NULL) { old_len = pmap->pm_ldt_len * sizeof(union descriptor); } else { old_len = NLDT * sizeof(union descriptor); old_ldt = ldt; } memcpy(new_ldt, old_ldt, old_len); memset((caddr_t)new_ldt + old_len, 0, new_len - old_len); if (old_ldt != ldt) km_free(old_ldt, round_page(old_len), &kv_any, &kp_dirty); pmap->pm_ldt = new_ldt; pmap->pm_ldt_len = ldt_len; if (pmap->pm_flags & PMF_USER_LDT) ldt_free(pmap); else pmap->pm_flags |= PMF_USER_LDT; ldt_alloc(pmap, new_ldt, new_len); pcb->pcb_ldt_sel = pmap->pm_ldt_sel; if (pcb == curpcb) lldt(pcb->pcb_ldt_sel); } copy: /* Now actually replace the descriptors. */ for (i = 0, n = ua.start; i < ua.num; i++, n++) pmap->pm_ldt[n] = descv[i]; simple_unlock(&pmap->pm_lock); *retval = ua.start; out: free(descv, M_TEMP); return (error); }