// Prepare parameters
void Oni2Grabber::initCamera_() 
{
	auto video_mode = streams_[0]->getVideoMode();

	// (1) Set parameters
	if(desired_width_ && desired_height_) 
	{
		video_mode.setResolution(640,480);
		streams_[0]->setVideoMode(video_mode);
	}
	
	// (2) Get parameters
	width_ = video_mode.getResolutionX();
	height_ = video_mode.getResolutionY();
	fps_ = video_mode.getFps();
	size_ = width_ * height_;
}
示例#2
0
//--------------------------------------------------------------
void ofxBulletPatch::updateMesh( ofMesh& aMesh ) {


    int totalNodes = getNumNodes();
    int totalFaces = getNumFaces();
    auto& tverts = aMesh.getVertices();
    
    if( _cachedMesh.getMode() == OF_PRIMITIVE_TRIANGLES ) {
        
        if( tverts.size() != totalFaces * 3 ) {
            tverts.resize( (getResolutionX()-1) * (getResolutionY()-1) * 6 );
        }
        
        auto& tnormals = aMesh.getNormals();
        if( tnormals.size() != tverts.size() ) {
            tnormals.resize( tverts.size() );
        }
        
        ofVec3f p1, p2, p3, p4;
        ofVec3f n1, n2, n3, n4;
        
        int ti = 0;
        for( int iy = 0; iy < getResolutionY()-1; iy++ ) {
            for( int ix = 0; ix < getResolutionX()-1; ix++ ) {
                
                int ni = iy * getResolutionX() + ix;
                p1.set( _softBody->m_nodes[ni].m_x.x(), _softBody->m_nodes[ni].m_x.y(), _softBody->m_nodes[ni].m_x.z() );
                n1.set( _softBody->m_nodes[ni].m_n.x(), _softBody->m_nodes[ni].m_n.y(), _softBody->m_nodes[ni].m_n.z() );
                ni = (iy+1) * getResolutionX() + ix;
                p2.set( _softBody->m_nodes[ni].m_x.x(), _softBody->m_nodes[ni].m_x.y(), _softBody->m_nodes[ni].m_x.z() );
                n2.set( _softBody->m_nodes[ni].m_n.x(), _softBody->m_nodes[ni].m_n.y(), _softBody->m_nodes[ni].m_n.z() );
                ni = (iy+1) * getResolutionX() + ix+1;
                p3.set( _softBody->m_nodes[ni].m_x.x(), _softBody->m_nodes[ni].m_x.y(), _softBody->m_nodes[ni].m_x.z() );
                n3.set( _softBody->m_nodes[ni].m_n.x(), _softBody->m_nodes[ni].m_n.y(), _softBody->m_nodes[ni].m_n.z() );
                ni = (iy) * getResolutionX() + ix+1;
                p4.set( _softBody->m_nodes[ni].m_x.x(), _softBody->m_nodes[ni].m_x.y(), _softBody->m_nodes[ni].m_x.z() );
                n4.set( _softBody->m_nodes[ni].m_n.x(), _softBody->m_nodes[ni].m_n.y(), _softBody->m_nodes[ni].m_n.z() );
                
                tverts[ti] = p1;
                tnormals[ti] = n1;
                ti += 1;
                
//                ni = (iy+1) * getResolutionX() + ix;
                tverts[ti] = p2;
                tnormals[ti] = n2;
                ti += 1;
                
//                ni = (iy+1) * getResolutionX() + ix+1;
                tverts[ti] = p3;
                tnormals[ti] = n3;
                ti += 1;
                
                
                
                //
//                ni = (iy+1) * getResolutionX() + ix+1;
                tverts[ti] = p3;
                tnormals[ti] = n3;
                ti += 1;
                
//                ni = (iy) * getResolutionX() + ix+1;
                tverts[ti] = p4;
                tnormals[ti] = n4;
                ti += 1;
                
//                ni = (iy) * getResolutionX() + ix;
                tverts[ti] = p1;
                tnormals[ti] = n1;
                ti += 1;
            }
            
        }
        
    }
    
    _lastMeshUpdateFrame = ofGetFrameNum();
}
void depth_sample()
{
    try {
        openni::OpenNI::initialize();

        openni::Device device;
        auto ret = device.open( openni::ANY_DEVICE );
        if ( ret != openni::STATUS_OK ) {
            throw std::runtime_error( "" );
        }

        openni::VideoStream depthStream;
        depthStream.create( device, openni::SensorType::SENSOR_DEPTH );
        depthStream.start();

        std::vector<openni::VideoStream*> streams;
        streams.push_back( &depthStream );

        cv::Mat depthImage;

        while ( 1 ) {
            int changedIndex;
            openni::OpenNI::waitForAnyStream( &streams[0], streams.size(), &changedIndex );
            if ( changedIndex == 0 ) {
                openni::VideoFrameRef depthFrame;
                depthStream.readFrame( &depthFrame );
                if ( depthFrame.isValid() ) {
                    depthImage = cv::Mat( depthStream.getVideoMode().getResolutionY(),
                                          depthStream.getVideoMode().getResolutionX(),
                                          CV_16U, (char*)depthFrame.getData() );

                    // 0-10000mmまでのデータを0-255にする
                    depthImage.convertTo( depthImage, CV_8U, 255.0 / 10000 );

                    // 中心点の距離を表示する
                    auto videoMode = depthStream.getVideoMode();

                    int centerX = videoMode.getResolutionX() / 2;
                    int centerY = videoMode.getResolutionY() / 2;
                    int centerIndex = (centerY * videoMode.getResolutionX()) + centerX;

                    short* depth = (short*)depthFrame.getData();

                    std::stringstream ss;
                    ss << "Center Point :" << depth[centerIndex];
                    cv::putText( depthImage, ss.str(), cv::Point( 0, 50 ),
                        cv::FONT_HERSHEY_SIMPLEX, 1.0, cv::Scalar( 255, 0, 0 ), 1 );

                    cv::imshow( "Depth Camera", depthImage );
                }
            }

            int key = cv::waitKey( 10 );
            if ( key == 'q' ) {
                break;
            }
        }
    }
    catch ( std::exception& ) {
        std::cout << openni::OpenNI::getExtendedError() << std::endl;
    }
}