Spectrum sampleDirection(DirectionSamplingRecord &dRec, PositionSamplingRecord &pRec, const Point2 &sample, const Point2 *extra) const { const Transform &trafo = m_worldTransform->eval(pRec.time); Point samplePos(sample.x, sample.y, 0.0f); if (extra) { /* The caller wants to condition on a specific pixel position */ samplePos.x = (extra->x + sample.x) * m_invResolution.x; samplePos.y = (extra->y + sample.y) * m_invResolution.y; } pRec.uv = Point2(samplePos.x * m_resolution.x, samplePos.y * m_resolution.y); /* Compute the corresponding position on the near plane (in local camera space) */ Point nearP = m_sampleToCamera(samplePos); nearP.x = nearP.x * (m_focusDistance / nearP.z); nearP.y = nearP.y * (m_focusDistance / nearP.z); nearP.z = m_focusDistance; Point apertureP = trafo.inverse().transformAffine(pRec.p); /* Turn that into a normalized ray direction */ Vector d = normalize(nearP - apertureP); dRec.d = trafo(d); dRec.measure = ESolidAngle; dRec.pdf = m_normalization / (d.z * d.z * d.z); return Spectrum(1.0f); }
Spectrum sampleDirection(DirectionSamplingRecord &dRec, PositionSamplingRecord &pRec, const Point2 &sample, const Point2 *extra) const { const Transform &trafo = m_worldTransform->eval(pRec.time); Point samplePos(sample.x, sample.y, 0.0f); if (extra) { /* The caller wants to condition on a specific pixel position */ samplePos.x = (extra->x + sample.x) * m_invResolution.x; samplePos.y = (extra->y + sample.y) * m_invResolution.y; } pRec.uv = Point2(samplePos.x * m_resolution.x, samplePos.y * m_resolution.y); Float sinPhi, cosPhi, sinTheta, cosTheta; math::sincos(samplePos.x * 2 * M_PI, &sinPhi, &cosPhi); math::sincos(samplePos.y * M_PI, &sinTheta, &cosTheta); dRec.d = trafo(Vector(sinPhi*sinTheta, cosTheta, -cosPhi*sinTheta)); dRec.measure = ESolidAngle; dRec.pdf = 1 / (2 * M_PI * M_PI * std::max(sinTheta, Epsilon)); return Spectrum(1.0f); }
void CActorEntity::update( float timeAlpha ) { bool alive = mGameEntity->isAlive(); if( alive ) { // fade out the outline float dt = CSystemTimer::getInstance().getDeltaTimeS(); mOutlineTTL -= dt; if( mOutlineTTL < 0.0f ) mOutlineTTL = 0.0f; SMatrix4x4& m = mWorldMat; SVector3 pos = samplePos( timeAlpha ); SVector3 dir = samplePos( timeAlpha + 0.1f ) - pos; if( dir.lengthSq() < 1.0e-3f ) dir = m.getAxisZ(); else dir.normalize(); if( mGameEntity->getType() == ENTITY_BLOCKER ) { double tt = CSystemTimer::getInstance().getTimeS(); D3DXMatrixRotationY( &m, tt * 0.2f ); m.getOrigin() = pos; m.getOrigin().y += sinf( tt * 0.6f ) * 0.2f; } else { m.getOrigin() = pos; m.getAxisZ() = dir; m.getAxisZ().y *= 0.2f; m.getAxisZ().normalize(); m.getAxisY().set( 0, 1, 0 ); m.getAxisX() = m.getAxisY().cross( m.getAxisZ() ); m.getAxisX().normalize(); m.getAxisY() = m.getAxisZ().cross( m.getAxisX() ); } } else { mOutlineTTL = 0.0f; } }
void SamplingIntegrator::renderBlock(const Scene *scene, const Sensor *sensor, Sampler *sampler, ImageBlock *block, const bool &stop, const std::vector< TPoint2<uint8_t> > &points) const { Float diffScaleFactor = 1.0f / std::sqrt((Float) sampler->getSampleCount()); bool needsApertureSample = sensor->needsApertureSample(); bool needsTimeSample = sensor->needsTimeSample(); RadianceQueryRecord rRec(scene, sampler); Point2 apertureSample(0.5f); Float timeSample = 0.5f; RayDifferential sensorRay; block->clear(); uint32_t queryType = RadianceQueryRecord::ESensorRay; if (!sensor->getFilm()->hasAlpha()) /* Don't compute an alpha channel if we don't have to */ queryType &= ~RadianceQueryRecord::EOpacity; for (size_t i = 0; i<points.size(); ++i) { Point2i offset = Point2i(points[i]) + Vector2i(block->getOffset()); if (stop) break; sampler->generate(offset); for (size_t j = 0; j<sampler->getSampleCount(); j++) { rRec.newQuery(queryType, sensor->getMedium()); Point2 samplePos(Point2(offset) + Vector2(rRec.nextSample2D())); if (needsApertureSample) apertureSample = rRec.nextSample2D(); if (needsTimeSample) timeSample = rRec.nextSample1D(); Spectrum spec = sensor->sampleRayDifferential( sensorRay, samplePos, apertureSample, timeSample); sensorRay.scaleDifferential(diffScaleFactor); spec *= Li(sensorRay, rRec); block->put(samplePos, spec, rRec.alpha); sampler->advance(); } } }
std::vector<Vector3R, Eigen::aligned_allocator<Vector3R>> Scene::getLightSourceSamples(int index, MaterialPtr &mPtr, Vector3R &normal, Real &area) { std::vector<Sample2D> samples = samplerPtr->sampleTriangle(1); std::vector<Vector3R, Eigen::aligned_allocator<Vector3R>> samplePos(samples.size()); for (int i = 0; i < samples.size(); i++) { samplePos[i] = (1 - samples[i].u - samples[i].v) * vertices[faces[index].v[0]].v + samples[i].u * vertices[faces[index].v[1]].v + samples[i].v * vertices[faces[index].v[2]].v; } normal = normals[faces[index].vn[0]].v; mPtr = faces[index].materialPtr; Vector3R e1 = vertices[faces[index].v[1]].v - vertices[faces[index].v[0]].v, e2 = vertices[faces[index].v[2]].v - vertices[faces[index].v[0]].v; area = 0.5 * e1.cross(e2).norm(); return samplePos; }
Spectrum samplePosition(PositionSamplingRecord &pRec, const Point2 &sample, const Point2 *extra) const { Point2 samplePos(sample); if (extra) { /* The caller wants to condition on a specific pixel position */ samplePos.x = (extra->x + sample.x) * m_invResolution.x; samplePos.y = (extra->y + sample.y) * m_invResolution.y; } m_shape->samplePosition(pRec, samplePos); pRec.uv = Point2(samplePos.x * m_resolution.x, samplePos.y * m_resolution.y); return Spectrum(M_PI / (pRec.pdf * m_shape->getSurfaceArea())); }
double BlobVoxelizationKernelValues::integrateBlobOverVoxel(const Vec3i& voxel) const { double val = 0.0; for(int sz = 0; sz < SAMPLES; ++sz) { for(int sy = 0; sy < SAMPLES; ++sy) { for(int sx = 0; sx < SAMPLES; ++sx) { Vec3d samplePos((double)voxel.x - 0.5f + ((double)sx + 0.5) / (double)SAMPLES, (double)voxel.y - 0.5f + ((double)sy + 0.5) / (double)SAMPLES, (double)voxel.z - 0.5f + ((double)sz + 0.5) / (double)SAMPLES); val += blobParameters.evaluate(samplePos.getLength()); } } } return val / (double)(SAMPLES * SAMPLES * SAMPLES); }
void HOGTrainer::train() { vector<Mat> fullPosLst; vector<Mat> fullNegLst; vector<Mat> negLst; vector<Mat> posLst; vector<Mat> gradientLst; vector<int> labels; loadImages(posDir, pos, fullPosLst); samplePos(fullPosLst, posLst, size); labels.assign(posLst.size(), +1); const unsigned int old = (unsigned int) labels.size(); loadImages(negDir, neg, fullNegLst); sampleNeg(fullNegLst, negLst, size); labels.insert(labels.end(), negLst.size(), -1); cout << old << " " << labels.size(); CV_Assert(old < labels.size()); computeHog(posLst, gradientLst, size); computeHog(negLst, gradientLst, size); trainSvm(gradientLst, labels); }
Spectrum samplePosition(PositionSamplingRecord &pRec, const Point2 &sample, const Point2 *extra) const { const Transform &trafo = m_worldTransform->eval(pRec.time); Point samplePos(sample.x, sample.y, 0.0f); if (extra) { /* The caller wants to condition on a specific pixel position */ samplePos.x = (extra->x + sample.x) * m_invResolution.x; samplePos.y = (extra->y + sample.y) * m_invResolution.y; } pRec.uv = Point2(samplePos.x * m_resolution.x, samplePos.y * m_resolution.y); Point nearP = m_sampleToCamera.transformAffine(samplePos); nearP.z = 0.0f; pRec.p = trafo.transformAffine(nearP); pRec.n = trafo(Vector(0.0f, 0.0f, 1.0f)); pRec.pdf = m_invSurfaceArea; pRec.measure = EArea; return Spectrum(1.0f); }
void renderBlock(const Scene *scene, const Sensor *sensor, Sampler *sampler, ImageBlock *block, const bool &stop, const std::vector< TPoint2<uint8_t> > &points) const { Float diffScaleFactor = 1.0f / std::sqrt((Float)sampler->getSampleCount()); bool needsApertureSample = sensor->needsApertureSample(); bool needsTimeSample = sensor->needsTimeSample(); RadianceQueryRecord rRec(scene, sampler); Point2 apertureSample(0.5f); Float timeSample = 0.5f; RayDifferential sensorRay; block->clear(); uint32_t queryType = RadianceQueryRecord::ESensorRay; if (!sensor->getFilm()->hasAlpha()) /* Don't compute an alpha channel if we don't have to */ queryType &= ~RadianceQueryRecord::EOpacity; for (size_t i = 0; i < points.size(); ++i) { Point2i offset = Point2i(points[i]) + Vector2i(block->getOffset()); int index = offset.x + offset.y * width; if (stop) break; sampler->generate(offset); Float cntLdA = 0.f; std::vector<Float> cntLdW(m_numLobes, 0.f); for (size_t j = 0; j < sampler->getSampleCount(); j++) { rRec.newQuery(queryType, sensor->getMedium()); Point2 samplePos(Point2(offset) + Vector2(rRec.nextSample2D())); if (needsApertureSample) apertureSample = rRec.nextSample2D(); if (needsTimeSample) timeSample = rRec.nextSample1D(); Spectrum spec = sensor->sampleRayDifferential( sensorRay, samplePos, apertureSample, timeSample); sensorRay.scaleDifferential(diffScaleFactor); Spectrum oneTdA(0.f); Spectrum oneLdA(0.f); std::vector<Spectrum> oneTdW(m_numLobes, Spectrum(0.f)); std::vector<Spectrum> oneLdW(m_numLobes, Spectrum(0.f)); int albedoSegs = 0; spec *= Li(sensorRay, rRec, oneTdA, oneLdA, oneTdW, oneLdW, albedoSegs); block->put(samplePos, spec, rRec.alpha); bool goodSample = true; for (int c = 0; c < 3; c++) { if (!std::isfinite(oneLdA[c]) || oneLdA[c] < 0) { goodSample = false; break; } } if (goodSample) { LdA[index] += oneLdA; cntLdA += 1.f; } for (int k = 0; k < m_numLobes; k++) { goodSample = true; for (int c = 0; c < 3; c++) { if (!std::isfinite(oneLdW[k][c]) || oneLdW[k][c] < 0) { goodSample = false; break; } } if (goodSample) { LdW[k][index] += oneLdW[k]; cntLdW[k] += 1.f; } } imageSeg[index] |= albedoSegs; sampler->advance(); } if (cntLdA > 0.f) { LdA[index] /= cntLdA; } else { LdA[index] = Spectrum(0.f); } for (int k = 0; k < m_numLobes; k++) { if (cntLdW[k] > 0.f) { LdW[k][index] /= cntLdW[k]; } else { LdW[k][index] = Spectrum(0.f); } } } Float *data = new Float[(int)points.size() * 3]; std::string outfile = prefix + formatString("LdA_%03i_%03i.pfm", block->getOffset().x, block->getOffset().y); for (int i = 0; i < points.size(); i++) { Point2i p = Point2i(points[i]); int localIndex = p.x + p.y * block->getWidth(); Point2i offset = p + Vector2i(block->getOffset()); int globalIndex = offset.x + offset.y * width; Spectrum color(LdA[globalIndex]); for (int c = 0; c < 3; c++) { data[3 * localIndex + c] = color[c]; } } savePfm(outfile.c_str(), data, block->getWidth(), block->getHeight()); for (int k = 0; k < m_numLobes; k++) { outfile = prefix + formatString("LdW_l%02i_%03i_%03i.pfm", k, block->getOffset().x, block->getOffset().y); for (int i = 0; i < points.size(); i++) { Point2i p = Point2i(points[i]); int localIndex = p.x + p.y * block->getWidth(); Point2i offset = p + Vector2i(block->getOffset()); int globalIndex = offset.x + offset.y * width; Spectrum color(LdW[k][globalIndex]); for (int c = 0; c < 3; c++) { data[3 * localIndex + c] = color[c]; } } savePfm(outfile.c_str(), data, block->getWidth(), block->getHeight()); } outfile = prefix + formatString("image_seg_%03i_%03i.pfm", block->getOffset().x, block->getOffset().y); for (int i = 0; i < points.size(); i++) { Point2i p = Point2i(points[i]); int localIndex = p.x + p.y * block->getWidth(); Point2i offset = p + Vector2i(block->getOffset()); int globalIndex = offset.x + offset.y * width; Spectrum color(imageSeg[globalIndex]); for (int c = 0; c < 3; c++) { data[3 * localIndex + c] = color[c]; } } savePfm(outfile.c_str(), data, block->getWidth(), block->getHeight()); /* outfile = formatString("TdA_%03i_%03i.pfm", block->getOffset().x, block->getOffset().y); for (int i = 0; i < points.size(); i++) { Point2i p = Point2i(points[i]); int localIndex = p.x + p.y * block->getWidth(); Point2i offset = p + Vector2i(block->getOffset()); int globalIndex = offset.x + offset.y * width; Spectrum color(TdA[globalIndex] / Float(spp)); for (int c = 0; c < 3; c++) { data[3 * localIndex + c] = color[c]; } } savePfm(outfile.c_str(), data, block->getWidth(), block->getHeight()); */ delete[] data; }