// Deallocate user pages to bring the process size from oldsz to // newsz. oldsz and newsz need not be page-aligned, nor does newsz // need to be less than oldsz. oldsz can be larger than the actual // process size. Returns the new process size. int deallocuvm(pde_t *pgdir, uint oldsz, uint newsz) { pte_t *pte; uint a, pa; if(newsz >= oldsz) return oldsz; a = PGROUNDUP(newsz); for(; a < oldsz; a += PGSIZE){ pte = walkpgdir(pgdir, (char*)a, 0); if(!pte) a += (NPTENTRIES - 1) * PGSIZE; else if((*pte & PTE_P) != 0){ pa = PTE_ADDR(*pte); if(pa == 0) panic("kfree"); char *v = p2v(pa); kfree(v); *pte = 0; } } return newsz; }
static void save_decoded_frame(decoder_context *decoder, frame_data *frame) { FILE *fp = decoder->opaque; long foff = ftell(fp); fwrite(p2v(frame->Y_paddr), 1, frame_luma_size(decoder), fp); fwrite(p2v(frame->U_paddr), 1, frame_chroma_size(decoder), fp); fwrite(p2v(frame->V_paddr), 1, frame_chroma_size(decoder), fp); if (ferror(fp) != 0) { perror("Error writing to output file"); abort(); } printf("Saved frame %d file offset 0x%lX\n", decoder->frames_decoded - 1, foff); }
//PAGEBREAK: 36 // Print a process listing to console. For debugging. // Runs when user types ^P on console. // No lock to avoid wedging a stuck machine further. void procdump(void) { static char *states[] = { [UNUSED] "unused", [EMBRYO] "embryo", [SLEEPING] "sleep ", [RUNNABLE] "runble", [RUNNING] "run ", [ZOMBIE] "zombie" }; int i; struct proc *p; pde_t *pde; pte_t *pgtab, *pte; uint *va; char *state; uint pc[10]; for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){ if(p->state == UNUSED) continue; if(p->state >= 0 && p->state < NELEM(states) && states[p->state]) state = states[p->state]; else state = "???"; int j, k; cprintf("%d %s %s", p->pid, state, p->name); cprintf("Page tables: \n"); cprintf(" Memory location of page directory = %p\n", p->pgdir); for(j = 0 ; j < 1024 ; j++){ pde = &(p->pgdir[j]); if((uint)*pde & PTE_P){//The page directory entry is present cprintf(" pdir PTE %d, %d\n", j, (uint)(((uint)*pde >> 12 ) & 0xfffff)); pgtab = (pte_t*)p2v(PTE_ADDR(*pde)); cprintf(" memory location of page table = %p\n", pgtab); for(k = 0 ; k < 1024 ; k++){ pte = &pgtab[k]; if((uint)*pte & PTE_P && (uint)*pte & PTE_U) { va = (uint*)p2v(PTE_ADDR(*pte)); cprintf(" ptbl PTE %d, %p, %p\n", k, (uint)(PTE_ADDR(*pte) >> 12), va); } } } }
Point Sphere::Hit(Ray ray) { Point start = ray.start; Vector direction = ray.direction; Point retval; retval.valid = false; double A,B,C; A = dot(ray.direction, ray.direction); B = dot (p2v(ray.start), ray.direction); C = dot(p2v(ray.start), p2v(ray.start)) - 1.0; double discrim = B*B - A*C; if (discrim < 0.0) { // printf("ray misses\n"); return retval; } double discRoot = sqrt(discrim); double t1 = (-B - discRoot)/A; double t2 = (-B + discRoot)/A; if (t1 > 0.000001 && t2 > 0.000001) { retval = ray.start + v2p(direction)*t1; retval.valid = true; normal = p2v(retval); return retval; } /* double t2 = (-B + discRoot)/A; if (t2 > 0.00001) { // printf("back hit\n"); retval = ray.start + v2p(direction)*t2; retval.valid = true; normal = p2v(retval) ; return retval; } */ return retval; }
// Initialize free list of physical pages. void kinit(void) { char *p; initlock(&kmem.lock, "kmem"); p = (char*)PGROUNDUP((uint)newend); for(; p + PGSIZE <= (char*)p2v(PHYSTOP); p += PGSIZE) kfree(p); }
//PAGEBREAK! // Map user virtual address to kernel address. char* uva2ka(pde_t *pgdir, char *uva) { pte_t *pte; pte = walkpgdir(pgdir, uva, 0); if ((*pte & PTE_P) == 0) return 0; if ((*pte & PTE_U) == 0) return 0; return (char*) p2v(PTE_ADDR(*pte)); }
//Wille return 0 if error, 1 if success int check_page_fault(pde_t *pgdir, uint va) { pte_t *pte; uint pa; char *mem; //check if exists, and allowed by user if(va >= KERNBASE || va < 4096) { cprintf("Kernel or Null memory access\n"); return 0; } if((pte = walkpgdir(pgdir, (void *)va, 0)) == 0) { cprintf("memory access not in page dir\n"); return 0; } if( (!(*pte & PTE_P)) || (!(*pte & PTE_U)) ) { cprintf("memory access not for users\n"); return 0; } if( !(*pte & PTE_COW)) { cprintf("No cow bit, writing to read only mem\n"); return 0; } if( *pte & PTE_W) { cprintf("Writing other processes mem, error\n"); return 0; } pa = PTE_ADDR(*pte); //CHANGE: update reference counts acquire(&r_c.lock); if(r_c.ref_count[pa / 4096] == 1) { *pte = *pte | PTE_W; *pte = *pte & (~PTE_COW); release(&r_c.lock); //flush translation lookaside buffer flushtlb(); return 1; } else { r_c.ref_count[pa / 4096]--; release(&r_c.lock); if((mem = kalloc()) == 0) { return 0; } memmove(mem, (char*)p2v(pa), PGSIZE); *pte = v2p(mem) | PTE_FLAGS(*pte) | PTE_W; *pte = *pte & (~PTE_COW); acquire(&r_c.lock); r_c.ref_count[v2p(mem) / 4096] = 1; release(&r_c.lock); //flush translation lookaside buffer flushtlb(); return 1; } }
// Return the address of the PTE in page directory that corresponds to // virtual address va. If alloc!=0, create any required page table pages. static pte_t* walkpgdir (pgd_t *pgdbase, const void *va, int alloc) { pgd_t *pgd; pmd_t *pmdbase; pmd_t *pmd; pte_t *ptebase; pgd = &pgdbase[PGD_IDX((uint64)va)]; if(*pgd & (ENTRY_TABLE | ENTRY_VALID)) { pmdbase = (pmd_t*) p2v((*pgd) & PG_ADDR_MASK); } else { if (!alloc || (pmdbase = (pmd_t*) kpt_alloc()) == 0) { return 0; } memset(pmdbase, 0, PT_SZ); *pgd = v2p(pmdbase) | ENTRY_TABLE | ENTRY_VALID; } pmd = &pmdbase[PMD_IDX(va)]; if (*pmd & (ENTRY_TABLE | ENTRY_VALID)) { ptebase = (pte_t*) p2v((*pmd) & PG_ADDR_MASK); } else { if (!alloc || (ptebase = (pte_t*) kpt_alloc()) == 0) { return 0; } // Make sure all those PTE_P bits are zero. memset(ptebase, 0, PT_SZ); // The permissions here are overly generous, but they can // be further restricted by the permissions in the page table // entries, if necessary. *pmd = v2p(ptebase) | ENTRY_TABLE | ENTRY_VALID; } return &ptebase[PTE_IDX(va)]; }
void swapOut(struct proc* p) { //write to file char filename[9]; struct file* f; uint i; pte_t* pte=0; getSwapFileName(p, filename); //cprintf("swapout %s %d\n", p->name, p->pid); //release(&inswapper_lk); //acquire(&inswapper_lk); release(&ptable.lock); f = openKernelFile(filename, O_CREATE | O_WRONLY); acquire(&ptable.lock); //cprintf("sfff\n"); //release(&inswapper_lk); if(f == 0) panic("swapout: file open error\n"); //cprintf("swapout: before write\n"); int freed = 0; for (i = 0; i < p->sz; i += PGSIZE) { if (!(pte = walkpgdir(p->pgdir, (void *) i, 0))) panic("swapout: pte should exist\n"); //cprintf("walkpgdir: ok\n"); if (!(*pte & PTE_P)) panic("swapout: page not present\n"); if((*pte & PTE_SHR) || !(*pte & PTE_U)) continue; char *addr=(char*)p2v(PTE_ADDR(*pte)); //acquire(&inswapper_lk); release(&ptable.lock); filewrite(f, addr, PGSIZE); acquire(&ptable.lock); // release(&inswapper_lk); //cprintf("(w=%s)", addr); kfree(addr); *pte = 0; freed++; //cprintf("swapout: wrote %d\n",i/PGSIZE); } //cprintf("swapout freed %d\n", freed); //kfree((char*) p->pgdir); //cprintf("swapout: after write\n"); //freevm(p->pgdir); // acquire(&inswapper_lk); release(&ptable.lock); fileclose(f); acquire(&ptable.lock); // release(&inswapper_lk); }
// Look for an MP structure in the len bytes at addr. static struct mp* mpsearch1(uint a, int len) { uchar *e, *p, *addr; addr = p2v(a); e = addr+len; for(p = addr; p < e; p += sizeof(struct mp)) if(memcmp(p, "_MP_", 4) == 0 && sum(p, sizeof(struct mp)) == 0) return (struct mp*)p; return 0; }
//PAGEBREAK! // Map user virtual address to kernel address. char* uva2ka(pde_t *pgdir, char *uva) { pte_t *pte; pte = walkpgdir(pgdir, uva, UVMPDXATTR, 0); if((uint)*pte == 0) return 0; if(((uint)*pte & PTX_AP(U_AP)) == 0) return 0; return (char*)p2v(PTE_ADDR(*pte)); }
// Free a page table and all the physical memory pages // in the user part. void freevm(pde_t *pgdir) { uint i; if (pgdir == 0) panic("freevm: no pgdir"); deallocuvm(pgdir, KERNBASE, 0); for (i = 0; i < NPDENTRIES; i++) { if (pgdir[i] & PTE_P) { char * v = p2v(PTE_ADDR(pgdir[i])); kfree(v); } } kfree((char*) pgdir); }
static pte_t * walkpgdir(pde_t *pgdir, const void *va) { pde_t *pde; pte_t *pgtab; pde = &pgdir[PDX(va)]; if(*pde & PTE_P) { pgtab = (pte_t*)p2v(PTE_ADDR(*pde)); } else { return 0; } return &pgtab[PTX(va)]; }
// Set up kernel part of a page table. pde_t* setupkvm(void) { pde_t *pgdir; struct kmap *k; if ((pgdir = (pde_t*) kalloc()) == 0) return 0; memset(pgdir, 0, PGSIZE); if (p2v(PHYSTOP) > (void*) DEVSPACE) panic("PHYSTOP too high"); for (k = kmap; k < &kmap[NELEM(kmap)]; k++) if (mappages(pgdir, k->virt, k->phys_end - k->phys_start, (uint) k->phys_start, k->perm) < 0) return 0; return pgdir; }
// Free a page table and all the physical memory pages // in the user part. void freevm(pde_t *pgdir) { uint i; if(pgdir == 0) panic("freevm: no pgdir"); deallocuvm(pgdir, USERBOUND, 0); for(i = 0; i < NPDENTRIES; i++){ if((uint)pgdir[i] != 0){ char * v = p2v(PTE_ADDR(pgdir[i])); kfree(v); } } kfree((char*)pgdir); }
// user virtual address to kernel address void * uva2ka(Pml4e *pgmap, void *addr) { Pte *pte = walkpgmap(pgmap, addr, 0); if (pte == nil) return nil; if ((*pte & PTE_P) == 0) return nil; if ((*pte & PTE_U) == 0) return nil; uintptr pg = (uintptr)p2v(pte_addr(*pte)); uintptr a = pg | ((uintptr)addr & 0xFFF); return (void *)a; }
// Search for an MP configuration table. For now, // don't accept the default configurations (physaddr == 0). // Check for correct signature, calculate the checksum and, // if correct, check the version. // To do: check extended table checksum. static struct mpconf* mpconfig(struct mp **pmp) { struct mpconf *conf; struct mp *mp; if((mp = mpsearch()) == 0 || mp->physaddr == 0) return 0; conf = (struct mpconf*) p2v((uint) mp->physaddr); if(memcmp(conf, "PCMP", 4) != 0) return 0; if(conf->version != 1 && conf->version != 4) return 0; if(sum((uchar*)conf, conf->length) != 0) return 0; *pmp = mp; return conf; }
//PAGEBREAK! // Map user virtual address to kernel address. char* uva2ka (pgd_t *pgdir, char *uva) { pte_t *pte; pte = walkpgdir(pgdir, uva, 0); // make sure it exists if ((*pte & (ENTRY_PAGE | ENTRY_VALID)) == 0) { return 0; } // make sure it is a user page if (PTE_AP(*pte) != AP_RW_1_0) { return 0; } return (char*) p2v(PTE_ADDR(*pte)); }
// Load a program segment into pgdir. addr must be page-aligned // and the pages from addr to addr+sz must already be mapped. int loaduvm(pde_t *pgdir, char *addr, struct inode *ip, uint offset, uint sz) { uint i, pa, n; pte_t *pte; if ((uint) addr % PGSIZE != 0) panic("loaduvm: addr must be page aligned"); for (i = 0; i < sz; i += PGSIZE) { if ((pte = walkpgdir(pgdir, addr + i, 0)) == 0) panic("loaduvm: address should exist"); pa = PTE_ADDR(*pte); if (sz - i < PGSIZE) n = sz - i; else n = PGSIZE; if (readi(ip, p2v(pa), offset + i, n) != n) return -1; } return 0; }
uint swapOut(pde_t *pgdir) { uint va_page; pde_t *pte; char* psyc_page; va_page = nextPageSwap(pgdir); if (!isNotInitShell(proc) || va_page == UNUSED_VA) { return -1; } if ((proc->swapData).nSwappedPages >= MAX_SWAP_PAGES) { panic("Trying to use more than 30 pages!!!"); } // add page address to swap list in proc, copy the page to swap file and inc the num of swapped pages va_page = PGROUNDDOWN(va_page); int j = removeMemAddr(va_page); int i = addSwapAddr(va_page); #ifdef NFU (proc->swapData).nfu[i + MAX_PSYC_PAGES] = (proc->swapData).nfu[j]; (proc->swapData).nfu[j] = 0; #endif #if defined(FIFO) || defined(SCFIFO) (proc->swapData).creationTime[i + MAX_PSYC_PAGES] = (proc->swapData).creationTime[j]; (proc->swapData).creationTime[j] = -1; #endif writeToSwapFile(proc, (char *) va_page, i * PGSIZE, PGSIZE); // get address of va_page in page table pgdir, update flags and free it if ((pte = walkpgdir(pgdir, (char *) va_page, 0)) == 0) panic("swapOut: Page table not found!"); *pte &= ~PTE_P; // change flag to not present *pte |= PTE_PG; // change flag to swapped out psyc_page = p2v(PTE_ADDR(*pte)); kfree(psyc_page); lcr3(PTE_ADDR(v2p(pgdir))); // switch to new address space return va_page; }
// Return the address of the PTE in page table pgdir // that corresponds to virtual address va. If alloc!=0, // create any required page table pages. static pte_t * walkpgdir(pde_t *pgdir, const void *va, int alloc) { pde_t *pde; pte_t *pgtab; pde = &pgdir[PDX(va)]; if (*pde & PTE_P) { pgtab = (pte_t*) p2v(PTE_ADDR(*pde)); } else { if (!alloc || (pgtab = (pte_t*) kalloc()) == 0) return 0; // Make sure all those PTE_P bits are zero. memset(pgtab, 0, PGSIZE); // The permissions here are overly generous, but they can // be further restricted by the permissions in the page table // entries, if necessary. *pde = v2p(pgtab) | PTE_P | PTE_W | PTE_U; } return &pgtab[PTX(va)]; }
// Given a parent process's page table, create a copy // of it for a child. pgd_t* copyuvm (pgd_t *pgdir, uint sz) { pgd_t *d; pte_t *pte; uint64 pa, i, ap; char *mem; // allocate a new first level page directory d = kpt_alloc(); if (d == NULL ) { return NULL ; } // copy the whole address space over (no COW) for (i = 0; i < sz; i += PTE_SZ) { if ((pte = walkpgdir(pgdir, (void *) i, 0)) == 0) { panic("copyuvm: pte should exist"); } if (!(*pte & (ENTRY_PAGE | ENTRY_VALID))) { panic("copyuvm: page not present"); } pa = PTE_ADDR (*pte); ap = PTE_AP (*pte); if ((mem = alloc_page()) == 0) { goto bad; } memmove(mem, (char*) p2v(pa), PTE_SZ); if (mappages(d, (void*) i, PTE_SZ, v2p(mem), ap) < 0) { goto bad; } } return d; bad: freevm(d); return 0; }
static ulong * pgmapget(ulong *table, int offset, int alloc) { ulong *innertab; ulong *entry = &table[offset]; if (*entry & PTE_P) { innertab = (ulong *)p2v(pte_addr(*entry)); } else { if (!alloc || (innertab = kalloc()) == nil) return nil; memzero(innertab, PGSIZE); // xv6 gives all the permissions, but where does // it restrict it? *entry = v2p(innertab) | PTE_W | PTE_U | PTE_P; } return innertab; }
void PointSolver2::projectModel (cv::Mat &output, vector<ModelLine> &model, PointSolver2::Projector &projector, Matrix4 &viewMatrix) { output = cv::Mat::zeros((int)projector.height, (int)projector.width, CV_8UC1); for (int lid=0; lid<model.size(); lid++) { ModelLine &line = model[lid]; ProjectedPoint P1, P2; P1.coord = projectPoint (line.p1, viewMatrix, projector, P1.inCam); P2.coord = projectPoint (line.p2, viewMatrix, projector, P2.inCam); Point2 P1scr = projector.NormalizedToScreen(P1.coord), P2scr = projector.NormalizedToScreen(P2.coord); cv::Point p1v (P1scr.x(), P1scr.y()); cv::Point p2v (P2scr.x(), P2scr.y()); cv::circle (output, p1v, 3, 255); cv::circle (output, p2v, 3, 255); cv::line (output, p1v, p2v, 255); } }
Pml4e * copyuvm(Pml4e *oldmap, usize sz) { uintptr a; Pml4e *newmap; Pte *pte; uchar *oldmem, *newmem; uint flags; newmap = setupkvm(); if (newmap == nil) return nil; for (a = 0; a < sz; a += PGSIZE) { pte = walkpgmap(oldmap, (void *)a, 0); if (pte == nil) panic("copyuvm - nil pte"); if (!*pte & PTE_P) panic("copyuvm - page not present"); oldmem = p2v(pte_addr(*pte)); flags = pte_flags(*pte); newmem = kalloc(); if (newmem == nil) goto bad; memmove(newmem, oldmem, PGSIZE); if (mappages(newmap, (void *)a, PGSIZE, v2p(newmem), flags) < 0) goto bad; } return newmap; bad: freeuvm(newmap); return nil; }
// Start the non-boot (AP) processors. static void startothers(void) { extern uchar _binary_entryother_start[], _binary_entryother_size[]; uchar *code; struct cpu *c; char *stack; // Write entry code to unused memory at 0x7000. // The linker has placed the image of entryother.S in // _binary_entryother_start. code = p2v(0x7000); memmove(code, _binary_entryother_start, (uint)_binary_entryother_size); for(c = cpus; c < cpus+ncpu; c++){ if(c == cpus+cpunum()) // We've started already. continue; // Tell entryother.S what stack to use, where to enter, and what // pgdir to use. We cannot use kpgdir yet, because the AP processor // is running in low memory, so we use entrypgdir for the APs too. // kalloc can return addresses above 4Mbyte (the machine may have // much more physical memory than 4Mbyte), which aren't mapped by // entrypgdir, so we must allocate a stack using enter_alloc(); // this introduces the constraint that xv6 cannot use kalloc until // after these last enter_alloc invocations. stack = enter_alloc(); *(void**)(code-4) = stack + KSTACKSIZE; *(void**)(code-8) = mpenter; *(int**)(code-12) = (void *) v2p(entrypgdir); lapicstartap(c->id, v2p(code)); // wait for cpu to finish mpmain() while(c->started == 0) ; } }
void swapOut(struct proc* p){ //create flie char id_as_str[3]; // need to pre determine number of digits in p->pid itoa(p->pid,id_as_str); char path[strlen(id_as_str) + 5]; strcat(path,0,id_as_str,".swap"); p->swapped_file = kernel_open(path,O_CREATE | O_WRONLY); pte_t *pte; int i; uint pa; for(i = 0; i < p->sz; i += PGSIZE){ if((pte = walkpgdir(p->pgdir, (void *) i, 0)) == 0) panic("copyuvm: pte should exist"); if(!(*pte & PTE_P)) panic("copyuvm: page not present"); pa = PTE_ADDR(*pte); //cprintf("p->swapped_file %d\n",p->swapped_file); if(filewrite(p->swapped_file,p2v(pa),PGSIZE) < 0) panic("filewrite: error in swapOut"); } int fd; for(fd = 0; fd < NOFILE; fd++){ if(p->ofile[fd] && p->ofile[fd] == p->swapped_file){ fileclose(p->ofile[fd]); p->ofile[fd] = 0; break; } } p->swapped_file = 0; p->swapped = 1; deallocuvm(p->pgdir,p->sz,0); p->state = SLEEPING_SUSPENDED; }
// Allocate page tables and physical memory to grow process from oldsz to // newsz, which need not be page aligned. Returns new size or 0 on error. int allocuvm(pde_t *pgdir, uint oldsz, uint newsz) { char *mem; uint a; if((newsz >= KERNBASE)||( newsz >= (uint)(p2v(PHYSTOP) - proc->ssm))) return 0; if(newsz < oldsz) return oldsz; a = PGROUNDUP(oldsz); for(; a < newsz; a += PGSIZE){ mem = kalloc(); if(mem == 0){ cprintf("allocuvm out of memory\n"); deallocuvm(pgdir, newsz, oldsz); return 0; } memset(mem, 0, PGSIZE); mappages(pgdir, (char*)a, PGSIZE, v2p(mem), PTE_W|PTE_U); } return newsz; }
void CompileThreadPixel::thread() { EditableMap::Lights& lights = wmap->get_light_sources(); Tileset *ts = wmap->get_tileset_ptr(); size_t nlgt = lights.size(); int mw = wmap->get_width(); int mh = wmap->get_height(); int tw = ts->get_tile_width(); int th = ts->get_tile_height(); int w = mw * tw; int h = mh * th; short **pmap = wmap->get_map(); short **pdeco = wmap->get_decoration(); Point pr; for (size_t i = 0; i < nlgt; i++) { { ScopeMutex lock(mtx); finished_percent = 100 * (i + 1) / nlgt; } int r = lights[i]->radius; int lmaxsq = r * r; int lx = lights[i]->x; int ly = lights[i]->y; Point p2(static_cast<float>(lx * tw + (tw / 2)), static_cast<float>(ly * th + (th / 2))); int lsx = static_cast<int>(p2.x) - r; int lsy = static_cast<int>(p2.y) - r; int lex = static_cast<int>(p2.x) + r; int ley = static_cast<int>(p2.y) + r; if (lsx < 0) lsx = 0; if (lsy < 0) lsy = 0; if (lex > w) lex = w; if (ley > h) ley = h; int txs = lsx / tw; int txe = lex / tw; int tys = lsy / th; int tye = ley / th; for (int y = lsy; y < ley; y++) { for (int x = lsx; x < lex; x++) { int dindex = pdeco[y / th][x / tw]; if (dindex < 0) { lightmap[y][x * 4 + 3] = 0; } else { bool contact = false; Point p1(static_cast<float>(x), static_cast<float>(y)); float xd = p2.x - p1.x; float yd = p2.y - p1.y; float dist = xd * xd + yd * yd; if (dist < lmaxsq) { for (int tx = txs; tx < txe; tx++) { for (int ty = tys; ty < tye; ty++) { short index = pmap[ty][tx]; if (index >= 0) { if (ts->get_tile(index)->is_light_blocking()) { TileGraphic *tg = ts->get_tile(index)->get_tilegraphic(); TileGraphicGL *tggl = static_cast<TileGraphicGL *>(tg); if (tggl->get_bytes_per_pixel(0) < 4) { Point p1l(static_cast<float>(tx * tw), static_cast<float>(ty * th)); Point p2l(static_cast<float>(tx * tw), static_cast<float>((ty + 1) * th - 0.5f)); Point p1r(static_cast<float>((tx + 1) * tw - 0.5f), static_cast<float>(ty * th)); Point p2r(static_cast<float>((tx + 1) * tw - 0.5f), static_cast<float>((ty + 1) * th - 0.5f)); Point p1t(static_cast<float>(tx * tw), static_cast<float>(ty * th)); Point p2t(static_cast<float>((tx + 1) * tw - 0.5f), static_cast<float>(ty * th)); Point p1b(static_cast<float>(tx * tw), static_cast<float>((ty + 1) * th - 0.5f)); Point p2b(static_cast<float>((tx + 1) * tw - 0.5f), static_cast<float>((ty + 1) * th - 0.5f)); if (intersection(p1, p2, p1l, p2l, pr) || intersection(p1, p2, p1r, p2r, pr) || intersection(p1, p2, p1t, p2t, pr) || intersection(p1, p2, p1b, p2b, pr)) { contact = true; break; } } else { unsigned char *p = tggl->get_picture_array(0); for (int py = 0; py < th; py++) { for (int px = 0; px < tw; px++) { if (p[3] == 255) { Point p1v(static_cast<float>(tx * tw + px) + 0.5f, static_cast<float>(ty * th + py) - 0.5f); Point p2v(static_cast<float>(tx * tw + px) + 0.5f, static_cast<float>(ty * th + py) + 0.5f); Point p1h(static_cast<float>(tx * tw + px) - 0.5f, static_cast<float>(ty * th + py) + 0.5f); Point p2h(static_cast<float>(tx * tw + px) + 0.5f, static_cast<float>(ty * th + py) + 0.5f); if (intersection(p1, p2, p1v, p2v, pr) || intersection(p1, p2, p1h, p2h, pr)) { contact = true; break; } } p += 4; } if (contact) { break; } } if (contact) { break; } } } } } if (contact) { break; } } } else { contact = true; } if (!contact) { int v = static_cast<int>(sqrt(65025.0f * dist / lmaxsq)); if (v < lightmap[y][x * 4 + 3]) { lightmap[y][x * 4 + 3] = v; } } } } } } ScopeMutex lock(mtx); finished = true; }