void *BarPlayerMacOSXThread(void *data){ struct audioPlayer *player = data; char extraHeaders[25]; void *ret = PLAYER_RET_OK; WaitressReturn_t wRet = WAITRESS_RET_ERR; /* init handles */ player->waith.data = (void *) player; /* extraHeaders will be initialized later */ player->waith.extraHeaders = extraHeaders; player->songPlayed = 0; switch (player->audioFormat) { case PIANO_AF_AACPLUS: { OSStatus err = AudioFileStreamOpen(player, StreamPropertyListenerProc, StreamPacketsProc, kAudioFileAAC_ADTSType, &player->audioFileStream); if (err) PRINTERROR ("Error opening stream!\n"); player->waith.callback = BarPlayerAACCb; } break; case PIANO_AF_MP3: case PIANO_AF_MP3_HI: { OSStatus err = AudioFileStreamOpen(player, StreamPropertyListenerProc, StreamPacketsProc, kAudioFileMP3Type, &player->audioFileStream); if (err) PRINTERROR ("Error opening stream!\n"); player->waith.callback = BarPlayerAACCb; } break; default: PRINTERROR ("Unsupported audio format!\n"); return PLAYER_RET_OK; break; } player->mode = PLAYER_INITIALIZED; /* This loop should work around song abortions by requesting the * missing part of the song */ do { snprintf (extraHeaders, sizeof (extraHeaders), "Range: bytes=%zu-\r\n", player->bytesReceived); wRet = WaitressFetchCall (&player->waith); } while (wRet == WAITRESS_RET_PARTIAL_FILE || wRet == WAITRESS_RET_TIMEOUT || wRet == WAITRESS_RET_READ_ERR); switch (player->audioFormat) { case PIANO_AF_AACPLUS: case PIANO_AF_MP3: case PIANO_AF_MP3_HI: AudioQueueStop(player->audioQueue, false); AudioFileStreamClose(player->streamID); AudioQueueDispose(player->audioQueue, false); break; default: /* this should never happen: thread is aborted above */ break; } WaitressFree (&player->waith); pthread_mutex_lock(&player->mutex); player->mode = PLAYER_FINISHED_PLAYBACK; pthread_cond_broadcast(&player->cond); pthread_mutex_unlock(&player->mutex); return ret; }
int main (int argc, char * const argv[]) { // allocate a struct for storing our state MyData* myData = (MyData*)calloc(1, sizeof(MyData)); // initialize a mutex and condition so that we can block on buffers in use. pthread_mutex_init(&myData->mutex, NULL); pthread_cond_init(&myData->cond, NULL); pthread_cond_init(&myData->done, NULL); // get connected int connection_socket = MyConnectSocket(); if (connection_socket < 0) return 1; printf("connected\n"); // allocate a buffer for reading data from a socket const size_t kRecvBufSize = 40000; char* buf = (char*)malloc(kRecvBufSize * sizeof(char)); // create an audio file stream parser OSStatus err = AudioFileStreamOpen(myData, MyPropertyListenerProc, MyPacketsProc, kAudioFileAAC_ADTSType, &myData->audioFileStream); if (err) { PRINTERROR("AudioFileStreamOpen"); return 1; } while (!myData->failed) { // read data from the socket printf("->recv\n"); ssize_t bytesRecvd = recv(connection_socket, buf, kRecvBufSize, 0); printf("bytesRecvd %d\n", bytesRecvd); if (bytesRecvd <= 0) break; // eof or failure // parse the data. this will call MyPropertyListenerProc and MyPacketsProc err = AudioFileStreamParseBytes(myData->audioFileStream, bytesRecvd, buf, 0); if (err) { PRINTERROR("AudioFileStreamParseBytes"); break; } } // enqueue last buffer MyEnqueueBuffer(myData); printf("flushing\n"); err = AudioQueueFlush(myData->audioQueue); if (err) { PRINTERROR("AudioQueueFlush"); return 1; } printf("stopping\n"); err = AudioQueueStop(myData->audioQueue, false); if (err) { PRINTERROR("AudioQueueStop"); return 1; } printf("waiting until finished playing..\n"); pthread_mutex_lock(&myData->mutex); pthread_cond_wait(&myData->done, &myData->mutex); pthread_mutex_unlock(&myData->mutex); printf("done\n"); // cleanup free(buf); err = AudioFileStreamClose(myData->audioFileStream); err = AudioQueueDispose(myData->audioQueue, false); close(connection_socket); free(myData); return 0; }
void MakeHSkeleton::updateBonesByJoints(const std::vector<vec3>& _V) { if(_V.size() != 19811) { PRINTERROR("no make human topology used"); return; } // set offset and calc lengths for(unsigned int i = 0; i < mBones.size(); ++i) { mBones[i].t = mJoints[mBones[i].j0]; mBones[i].length = (mJoints[mBones[i].j1] - mJoints[mBones[i].j0]).norm(); } // head vec3 up = (mJoints[1] - mJoints[0]).normalized(); vec3 front = (_V[4434] - _V[4870]).normalized(); vec3 side = up.cross(front).normalized(); front = up.cross(side).normalized(); mBones[0].R.col(0) = front; mBones[0].R.col(1) = up; mBones[0].R.col(2) = side; // left shoulder up = (mJoints[2] - mJoints[1]).normalized(); front = (_V[12099] - _V[11909]).normalized(); side = up.cross(front).normalized(); front = up.cross(side).normalized(); mBones[1].R.col(0) = front; mBones[1].R.col(1) = up; mBones[1].R.col(2) = side; // right shoulder up = (mJoints[3] - mJoints[1]).normalized(); front = (_V[2207] - _V[2018]).normalized(); side = up.cross(front).normalized(); front = up.cross(side).normalized(); mBones[2].R.col(0) = front; mBones[2].R.col(1) = up; mBones[2].R.col(2) = side; // breast up = (mJoints[4] - mJoints[1]).normalized(); front = (_V[15414] - _V[5226]).normalized(); side = up.cross(front).normalized(); front = up.cross(side).normalized(); mBones[3].R.col(0) = front; mBones[3].R.col(1) = up; mBones[3].R.col(2) = side; // left abdomen up = (mJoints[9] - mJoints[4]).normalized(); front = (_V[12666] - _V[10996]).normalized(); side = up.cross(front).normalized(); front = up.cross(side).normalized(); mBones[4].R.col(0) = front; mBones[4].R.col(1) = up; mBones[4].R.col(2) = side; // right abdomen up = (mJoints[10] - mJoints[4]).normalized(); front = (_V[2775] - _V[1103]).normalized(); side = up.cross(front).normalized(); front = up.cross(side).normalized(); mBones[5].R.col(0) = front; mBones[5].R.col(1) = up; mBones[5].R.col(2) = side; // left upper arm up = (mJoints[5] - mJoints[2]).normalized(); front = (_V[11550] - _V[15218]).normalized(); side = up.cross(front).normalized(); front = up.cross(side).normalized(); mBones[6].R.col(0) = front; mBones[6].R.col(1) = up; mBones[6].R.col(2) = side; // right upper arm up = (mJoints[6] - mJoints[3]).normalized(); front = (_V[1659] - _V[5323]).normalized(); side = up.cross(front).normalized(); front = up.cross(side).normalized(); mBones[7].R.col(0) = front; mBones[7].R.col(1) = up; mBones[7].R.col(2) = side; // left upper leg up = (mJoints[11] - mJoints[9]).normalized(); front = (_V[13361] - _V[10389]).normalized(); side = up.cross(front).normalized(); front = up.cross(side).normalized(); mBones[8].R.col(0) = front; mBones[8].R.col(1) = up; mBones[8].R.col(2) = side; // right upper leg up = (mJoints[12] - mJoints[10]).normalized(); front = (_V[3466] - _V[496]).normalized(); side = up.cross(front).normalized(); front = up.cross(side).normalized(); mBones[9].R.col(0) = front; mBones[9].R.col(1) = up; mBones[9].R.col(2) = side; // left lower arm up = (mJoints[7] - mJoints[5]).normalized(); front = (_V[9908] - _V[16966]).normalized(); side = up.cross(front).normalized(); front = up.cross(side).normalized(); mBones[10].R.col(0) = front; mBones[10].R.col(1) = up; mBones[10].R.col(2) = side; // right lower arm up = (mJoints[8] - mJoints[6]).normalized(); front = (_V[13] - _V[7073]).normalized(); side = up.cross(front).normalized(); front = up.cross(side).normalized(); mBones[11].R.col(0) = front; mBones[11].R.col(1) = up; mBones[11].R.col(2) = side; // left lower leg up = (mJoints[13] - mJoints[11]).normalized(); front = (_V[18182] - _V[11443]).normalized(); side = up.cross(front).normalized(); front = up.cross(side).normalized(); mBones[12].R.col(0) = front; mBones[12].R.col(1) = up; mBones[12].R.col(2) = side; // right lower leg up = (mJoints[14] - mJoints[12]).normalized(); front = (_V[8267] - _V[1551]).normalized(); side = up.cross(front).normalized(); front = up.cross(side).normalized(); mBones[13].R.col(0) = front; mBones[13].R.col(1) = up; mBones[13].R.col(2) = side; // left hand up = (mJoints[15] - mJoints[7]).normalized(); front = (_V[17358] - _V[16755]).normalized(); side = up.cross(front).normalized(); front = up.cross(side).normalized(); mBones[14].R.col(0) = front; mBones[14].R.col(1) = up; mBones[14].R.col(2) = side; // right hand up = (mJoints[16] - mJoints[8]).normalized(); front = (_V[7464] - _V[6862]).normalized(); side = up.cross(front).normalized(); front = up.cross(side).normalized(); mBones[15].R.col(0) = front; mBones[15].R.col(1) = up; mBones[15].R.col(2) = side; // left foot up = (mJoints[17] - mJoints[13]).normalized(); front = (_V[19263] - _V[10545]).normalized(); side = up.cross(front).normalized(); front = up.cross(side).normalized(); mBones[16].R.col(0) = front; mBones[16].R.col(1) = up; mBones[16].R.col(2) = side; // right foot up = (mJoints[18] - mJoints[14]).normalized(); front = (_V[9346] - _V[652]).normalized(); side = up.cross(front).normalized(); front = up.cross(side).normalized(); mBones[17].R.col(0) = front; mBones[17].R.col(1) = up; mBones[17].R.col(2) = side; }
bool loadParams(int argc, char ** argv, std::string& worldFilename, std::string& robotFilename, std::string& objectFilename, std::string& outputDirectory, bool& saveSeparate, Eigen::Vector3d& objPos, int& maxIterations) { saveSeparate = false; worldFilename.clear(); robotFilename.clear(); objectFilename.clear(); outputDirectory.clear(); boost::program_options::variables_map vm; try { vm = loadParams(argc, argv); } catch (std::exception const& e) { PRINTERROR("Exception caught: " << e.what()); return false; } catch (...) { PRINTERROR("Exception caught"); return false; } boost::program_options::options_description desc = getOptions(); // desc=getOptions(); if (vm.count("help")) { PRINTMSG(desc); return false; } if (vm.count("dir") < 1) { PRINTERROR("Must specify an output directory"); PRINTMSG(desc); return false; } if (vm.count("wld") && (vm.count("rob") || vm.count("obj"))) { PRINTERROR("Cannot specify a world and a robot and/or object at the same time."); PRINTMSG(desc); return false; } if (!vm.count("wld") && !vm.count("rob")) { PRINTERROR("Have to specify either a robot or a world."); PRINTMSG(desc); return false; } if (vm.count("rob") != vm.count("obj")) { PRINTERROR("If you specify a robot, you also have to specify an object, and vice versa."); PRINTMSG(desc); return false; } if (vm.count("rob") > 1) { PRINTERROR("You can only specify one robot at this stage."); PRINTMSG(desc); return false; } if (vm.count("obj") > 1) { PRINTERROR("You can only specify one object at this stage."); PRINTMSG(desc); return false; } if (vm.count("obj") != vm.count("rob")) { PRINTERROR("If you specify a robot, you should also specify an object."); PRINTMSG(desc); return false; } if (vm.count("wld")) { worldFilename = vm["wld"].as<std::string>(); PRINTMSG("World file is " << worldFilename); } if (vm.count("rob")) { robotFilename = vm["rob"].as<std::string>(); PRINTMSG("Robot file is " << robotFilename); } if (vm.count("obj")) { objectFilename = vm["obj"].as<std::string>(); PRINTMSG("Object file is " << objectFilename); } if (vm.count("dir")) { outputDirectory = vm["dir"].as<std::string>(); PRINTMSG("Output dir is " << outputDirectory); } if (vm.count("iter")) { maxIterations = vm["iter"].as<int>(); PRINTMSG("Number of iterations: " << maxIterations); if (maxIterations < 35000) { PRINTWARN("Planning is not working well with max iterations < 35000"); } } if (vm.count("obj-pos")) { std::vector<float> vals=vm["obj-pos"].as<std::vector<float> >(); if (vals.size()!=3) { PRINTERROR("Must specify 3 values for --obj-pos: x, y and z (specified "<<vals.size()<<")"); PRINTMSG(desc); } PRINTMSG("Using initial object pose "<<vals[0]<<", "<<vals[1]<<", "<<vals[2]); objPos=Eigen::Vector3d(vals[0],vals[1],vals[2]); } if (vm.count("save-separate")) { saveSeparate=true; } return true; }
int main(int argc, char **argv) { signal(SIGSEGV, handler); signal(SIGABRT, handler); PRINT_INIT_STD(); std::string worldFilename; std::string robotFilename; std::string objectFilename; std::string outputDirectory; bool saveSeparate; Eigen::Vector3d objPos; int maxPlanningSteps = 50000; if (!loadParams(argc, argv, worldFilename, robotFilename, objectFilename, outputDirectory, saveSeparate, objPos, maxPlanningSteps)) { PRINTERROR("Could not read arguments"); return 1; } PRINTMSG("Creating planner"); std::string name = "EigenGraspPlanner1"; // TODO make parameter SHARED_PTR<GraspIt::GraspItSceneManager> graspitMgr(new GraspIt::GraspItSceneManagerHeadless()); #ifdef USE_EIGENGRASP_NOQT SHARED_PTR<GraspIt::EigenGraspPlannerNoQt> p(new GraspIt::EigenGraspPlannerNoQt(name, graspitMgr)); #else SHARED_PTR<GraspIt::EigenGraspPlanner> p(new GraspIt::EigenGraspPlanner(name, graspitMgr)); #endif // TODO parameterize: // Names for robot and object if not loaded from a world file. // If loaded from a world file, will be overwritten. std::string useRobotName="Robot1"; std::string useObjectName="Object1"; if (!worldFilename.empty()) { PRINTMSG("Loading world"); graspitMgr->loadWorld(worldFilename); std::vector<std::string> robs = graspitMgr->getRobotNames(); std::vector<std::string> objs = graspitMgr->getObjectNames(true); if (robs.empty()) { PRINTERROR("No robots loaded"); return 1; } if (objs.empty()) { PRINTERROR("No graspable objects loaded"); return 1; } if (robs.size()!=1) { PRINTERROR("Exactly 1 robot should have been loaded"); return 1; } if (objs.size()!=1) { PRINTERROR("Exactly 1 graspable object should have been loaded"); return 1; } useRobotName=robs.front(); useObjectName=objs.front(); PRINTMSG("Using robot "<<useRobotName<<" and object "<<useObjectName); } else { // TODO add an option to set the transforms. // For now, they're put in the origin. For the planning, this should not really matter... GraspIt::EigenTransform robotTransform; GraspIt::EigenTransform objectTransform; robotTransform.setIdentity(); objectTransform.setIdentity(); objectTransform.translate(objPos); // objectTransform.translate(Eigen::Vector3d(100,0,0)); std::string robotName(useRobotName); std::string objectName(useObjectName); if ((graspitMgr->loadRobot(robotFilename, robotName, robotTransform) != 0) || (graspitMgr->loadObject(objectFilename, objectName, true, objectTransform))) { PRINTERROR("Could not load robot or object"); return 1; } } bool createDir = true; bool saveIV = true; bool forceWrite = createDir; // only enforce if creating dir is also allowed // in case one wants to view the initial world before planning, save it: graspitMgr->saveGraspItWorld(outputDirectory + "/startWorld.xml", createDir); graspitMgr->saveInventorWorld(outputDirectory + "/startWorld.iv", createDir); if (saveSeparate) { graspitMgr->saveRobotAsInventor(outputDirectory + "/robotStartpose.iv", useRobotName, createDir, forceWrite); graspitMgr->saveObjectAsInventor(outputDirectory + "/object.iv", useObjectName, createDir, forceWrite); } int repeatPlanning = 1; int keepMaxPlanningResults = 3; bool finishWithAutograsp = false; p->plan(maxPlanningSteps, repeatPlanning, keepMaxPlanningResults, finishWithAutograsp); PRINTMSG("Saving results as world files"); bool saveWorld = true; std::string resultsWorldDirectory = outputDirectory; std::string filenamePrefix = "world"; p->saveResultsAsWorldFiles(resultsWorldDirectory, filenamePrefix, saveWorld, saveIV, createDir, saveSeparate); std::vector<GraspIt::EigenGraspResult> allGrasps; p->getResults(allGrasps); PRINTMSG("Grasp results:"); std::vector<GraspIt::EigenGraspResult>::iterator it; for (it = allGrasps.begin(); it != allGrasps.end(); ++it) { PRINTMSG(*it); } PRINTMSG("Quitting program."); return 1; }
// Load the assets. HRESULT VolumetricAnimation::LoadAssets() { HRESULT hr; // Create a root signature consisting of a descriptor table with a CBV SRV and a sampler. { CD3DX12_DESCRIPTOR_RANGE ranges[3]; CD3DX12_ROOT_PARAMETER rootParameters[3]; ranges[0].Init( D3D12_DESCRIPTOR_RANGE_TYPE_CBV, 1, 0 ); ranges[1].Init( D3D12_DESCRIPTOR_RANGE_TYPE_SRV, 1, 0 ); ranges[2].Init( D3D12_DESCRIPTOR_RANGE_TYPE_UAV, 1, 0 ); rootParameters[RootParameterCBV].InitAsDescriptorTable( 1, &ranges[0], D3D12_SHADER_VISIBILITY_ALL ); rootParameters[RootParameterSRV].InitAsDescriptorTable( 1, &ranges[1], D3D12_SHADER_VISIBILITY_PIXEL ); rootParameters[RootParameterUAV].InitAsDescriptorTable( 1, &ranges[2], D3D12_SHADER_VISIBILITY_ALL ); D3D12_STATIC_SAMPLER_DESC sampler = {}; sampler.Filter = D3D12_FILTER_MIN_MAG_MIP_POINT; sampler.AddressU = D3D12_TEXTURE_ADDRESS_MODE_BORDER; sampler.AddressV = D3D12_TEXTURE_ADDRESS_MODE_BORDER; sampler.AddressW = D3D12_TEXTURE_ADDRESS_MODE_BORDER; sampler.MipLODBias = 0; sampler.MaxAnisotropy = 0; sampler.ComparisonFunc = D3D12_COMPARISON_FUNC_NEVER; sampler.BorderColor = D3D12_STATIC_BORDER_COLOR_TRANSPARENT_BLACK; sampler.MinLOD = 0.0f; sampler.MaxLOD = D3D12_FLOAT32_MAX; sampler.ShaderRegister = 0; sampler.RegisterSpace = 0; sampler.ShaderVisibility = D3D12_SHADER_VISIBILITY_PIXEL; // Allow input layout and deny unnecessary access to certain pipeline stages. D3D12_ROOT_SIGNATURE_FLAGS rootSignatureFlags = D3D12_ROOT_SIGNATURE_FLAG_ALLOW_INPUT_ASSEMBLER_INPUT_LAYOUT | D3D12_ROOT_SIGNATURE_FLAG_DENY_HULL_SHADER_ROOT_ACCESS | D3D12_ROOT_SIGNATURE_FLAG_DENY_DOMAIN_SHADER_ROOT_ACCESS | D3D12_ROOT_SIGNATURE_FLAG_DENY_GEOMETRY_SHADER_ROOT_ACCESS; CD3DX12_ROOT_SIGNATURE_DESC rootSignatureDesc; rootSignatureDesc.Init( _countof(rootParameters), rootParameters, 1, &sampler, rootSignatureFlags ); ComPtr<ID3DBlob> signature; ComPtr<ID3DBlob> error; V( D3D12SerializeRootSignature( &rootSignatureDesc, D3D_ROOT_SIGNATURE_VERSION_1, &signature, &error ) ); if ( error ) PRINTERROR( reinterpret_cast< const char* >( error->GetBufferPointer() ) ); VRET( m_device->CreateRootSignature( 0, signature->GetBufferPointer(), signature->GetBufferSize(), IID_PPV_ARGS( &m_graphicsRootSignature ) ) ); DXDebugName( m_graphicsRootSignature ); // Create compute signature. Must change visibility for the SRV. rootParameters[RootParameterSRV].ShaderVisibility = D3D12_SHADER_VISIBILITY_ALL; CD3DX12_ROOT_SIGNATURE_DESC computeRootSignatureDesc( _countof( rootParameters ), rootParameters, 0, nullptr ); VRET( D3D12SerializeRootSignature( &computeRootSignatureDesc, D3D_ROOT_SIGNATURE_VERSION_1, &signature, &error ) ); VRET( m_device->CreateRootSignature( 0, signature->GetBufferPointer(), signature->GetBufferSize(), IID_PPV_ARGS( &m_computeRootSignature ) ) ); } // Create the pipeline state, which includes compiling and loading shaders. { ComPtr<ID3DBlob> vertexShader; ComPtr<ID3DBlob> pixelShader; ComPtr<ID3DBlob> computeShader; UINT compileFlags = 0; VRET( CompileShaderFromFile( GetAssetFullPath( _T( "VolumetricAnimation_shader.hlsl" ) ).c_str(), nullptr, D3D_COMPILE_STANDARD_FILE_INCLUDE, "vsmain", "vs_5_0", compileFlags, 0, &vertexShader ) ); VRET( CompileShaderFromFile( GetAssetFullPath( _T( "VolumetricAnimation_shader.hlsl" ) ).c_str(), nullptr, D3D_COMPILE_STANDARD_FILE_INCLUDE, "psmain", "ps_5_0", compileFlags, 0, &pixelShader ) ); VRET( CompileShaderFromFile( GetAssetFullPath( _T( "VolumetricAnimation_shader.hlsl" ) ).c_str(), nullptr, D3D_COMPILE_STANDARD_FILE_INCLUDE, "csmain", "cs_5_0", compileFlags, 0, &computeShader ) ); // Define the vertex input layout. D3D12_INPUT_ELEMENT_DESC inputElementDescs[] = { { "POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, D3D12_APPEND_ALIGNED_ELEMENT, D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA, 0 } }; CD3DX12_DEPTH_STENCIL_DESC depthStencilDesc( D3D12_DEFAULT ); depthStencilDesc.DepthEnable = true; depthStencilDesc.DepthWriteMask = D3D12_DEPTH_WRITE_MASK_ALL; depthStencilDesc.DepthFunc = D3D12_COMPARISON_FUNC_LESS_EQUAL; depthStencilDesc.StencilEnable = FALSE; // Describe and create the graphics pipeline state object (PSO). D3D12_GRAPHICS_PIPELINE_STATE_DESC psoDesc = {}; psoDesc.InputLayout = { inputElementDescs, _countof( inputElementDescs ) }; psoDesc.pRootSignature = m_graphicsRootSignature.Get(); psoDesc.VS = { reinterpret_cast< UINT8* >( vertexShader->GetBufferPointer() ), vertexShader->GetBufferSize() }; psoDesc.PS = { reinterpret_cast< UINT8* >( pixelShader->GetBufferPointer() ), pixelShader->GetBufferSize() }; psoDesc.RasterizerState = CD3DX12_RASTERIZER_DESC( D3D12_DEFAULT ); psoDesc.BlendState = CD3DX12_BLEND_DESC( D3D12_DEFAULT ); psoDesc.DepthStencilState = depthStencilDesc; psoDesc.SampleMask = UINT_MAX; psoDesc.PrimitiveTopologyType = D3D12_PRIMITIVE_TOPOLOGY_TYPE_TRIANGLE; psoDesc.NumRenderTargets = 1; psoDesc.RTVFormats[0] = DXGI_FORMAT_R8G8B8A8_UNORM; psoDesc.DSVFormat = DXGI_FORMAT_D32_FLOAT; psoDesc.SampleDesc.Count = 1; VRET( m_device->CreateGraphicsPipelineState( &psoDesc, IID_PPV_ARGS( &m_pipelineState ) ) ); DXDebugName( m_pipelineState ); // Describe and create the compute pipeline state object (PSO). D3D12_COMPUTE_PIPELINE_STATE_DESC computePsoDesc = {}; computePsoDesc.pRootSignature = m_computeRootSignature.Get(); computePsoDesc.CS = { reinterpret_cast< UINT8* >( computeShader->GetBufferPointer() ), computeShader->GetBufferSize() }; VRET( m_device->CreateComputePipelineState( &computePsoDesc, IID_PPV_ARGS( &m_computeState ) ) ); DXDebugName( m_computeState ); } // Create the compute command list. VRET( m_device->CreateCommandList( 0, D3D12_COMMAND_LIST_TYPE_COMPUTE, m_computeCmdAllocator.Get(),m_computeState.Get(), IID_PPV_ARGS( &m_computeCmdList ) ) ); DXDebugName( m_computeCmdList ); VRET( m_computeCmdList->Close() ); // Create the graphics command list. VRET( m_device->CreateCommandList( 0, D3D12_COMMAND_LIST_TYPE_DIRECT, m_graphicCmdAllocator.Get(), m_pipelineState.Get(), IID_PPV_ARGS( &m_graphicCmdList ) ) ); DXDebugName( m_graphicCmdList ); // Note: ComPtr's are CPU objects but this resource needs to stay in scope until // the command list that references it has finished executing on the GPU. // We will flush the GPU at the end of this method to ensure the resource is not // prematurely destroyed. ComPtr<ID3D12Resource> volumeBufferUploadHeap; // Create the volumeBuffer. { UINT volumeBufferSize = m_volumeDepth*m_volumeHeight*m_volumeWidth * 4 * sizeof( UINT8 ); D3D12_RESOURCE_DESC bufferDesc = CD3DX12_RESOURCE_DESC::Buffer( volumeBufferSize, D3D12_RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS ); D3D12_RESOURCE_DESC uploadBufferDesc = CD3DX12_RESOURCE_DESC::Buffer( volumeBufferSize ); VRET( m_device->CreateCommittedResource(&CD3DX12_HEAP_PROPERTIES( D3D12_HEAP_TYPE_DEFAULT ),D3D12_HEAP_FLAG_NONE, &bufferDesc,D3D12_RESOURCE_STATE_COPY_DEST,nullptr,IID_PPV_ARGS( &m_volumeBuffer ) ) ); const UINT64 uploadBufferSize = GetRequiredIntermediateSize( m_volumeBuffer.Get(), 0, 1 ); // Create the GPU upload buffer. VRET( m_device->CreateCommittedResource(&CD3DX12_HEAP_PROPERTIES( D3D12_HEAP_TYPE_UPLOAD ),D3D12_HEAP_FLAG_NONE, &uploadBufferDesc,D3D12_RESOURCE_STATE_GENERIC_READ, nullptr,IID_PPV_ARGS( &volumeBufferUploadHeap ) ) ); // Copy data to the intermediate upload heap and then schedule a copy // from the upload heap to the Texture2D. UINT8* volumeBuffer = ( UINT8* ) malloc( volumeBufferSize ); memset( volumeBuffer, 64, volumeBufferSize ); //float radius = m_volumeHeight / 2.f; float a = m_volumeWidth / 2.f; float b = m_volumeHeight / 2.f; float c = m_volumeDepth / 2.f; float radius = sqrt( a*a + b*b + c*c ); for ( UINT z = 0; z < m_volumeDepth; z++ ) for ( UINT y = 0; y < m_volumeHeight; y++ ) for ( UINT x = 0; x < m_volumeWidth; x++ ) { float _x = x - m_volumeWidth / 2.f; float _y = y - m_volumeHeight / 2.f; float _z = z - m_volumeDepth / 2.f; //float currentRaidus =abs(_x)+abs(_y)+abs(_z); float currentRaidus = sqrt( _x*_x + _y*_y + _z*_z ); float scale = currentRaidus *3.f / radius; UINT idx = 4 - (UINT)floor( scale ); UINT interm = ( UINT ) ( 192 * scale +0.5f ); UINT8 col = interm % 192+1; volumeBuffer[( x + y*m_volumeWidth + z*m_volumeHeight*m_volumeWidth ) * 4 + 0] += col * m_constantBufferData.colVal[idx].x; volumeBuffer[( x + y*m_volumeWidth + z*m_volumeHeight*m_volumeWidth ) * 4 + 1] += col * m_constantBufferData.colVal[idx].y; volumeBuffer[( x + y*m_volumeWidth + z*m_volumeHeight*m_volumeWidth ) * 4 + 2] += col * m_constantBufferData.colVal[idx].z; volumeBuffer[( x + y*m_volumeWidth + z*m_volumeHeight*m_volumeWidth ) * 4 + 3] = m_constantBufferData.colVal[idx].w; } D3D12_SUBRESOURCE_DATA volumeBufferData = {}; volumeBufferData.pData = &volumeBuffer[0]; volumeBufferData.RowPitch = volumeBufferSize; volumeBufferData.SlicePitch = volumeBufferData.RowPitch; UpdateSubresources( m_graphicCmdList.Get(), m_volumeBuffer.Get(), volumeBufferUploadHeap.Get(), 0, 0, 1, &volumeBufferData ); m_graphicCmdList->ResourceBarrier( 1, &CD3DX12_RESOURCE_BARRIER::Transition( m_volumeBuffer.Get(), D3D12_RESOURCE_STATE_COPY_DEST, D3D12_RESOURCE_STATE_UNORDERED_ACCESS ) ); // Describe and create a SRV for the volumeBuffer. D3D12_SHADER_RESOURCE_VIEW_DESC srvDesc = {}; srvDesc.Shader4ComponentMapping = D3D12_DEFAULT_SHADER_4_COMPONENT_MAPPING; srvDesc.Format = DXGI_FORMAT_UNKNOWN; srvDesc.ViewDimension = D3D12_SRV_DIMENSION_BUFFER; srvDesc.Buffer.FirstElement = 0; srvDesc.Buffer.NumElements = m_volumeDepth*m_volumeHeight*m_volumeWidth; srvDesc.Buffer.StructureByteStride = 4 * sizeof( UINT8 ); srvDesc.Buffer.Flags = D3D12_BUFFER_SRV_FLAG_NONE; CD3DX12_CPU_DESCRIPTOR_HANDLE srvHandle( m_cbvsrvuavHeap->GetCPUDescriptorHandleForHeapStart(), RootParameterSRV, m_cbvsrvuavDescriptorSize ); m_device->CreateShaderResourceView( m_volumeBuffer.Get(), &srvDesc, srvHandle ); // Describe and create a UAV for the volumeBuffer. D3D12_UNORDERED_ACCESS_VIEW_DESC uavDesc = {}; uavDesc.Format = DXGI_FORMAT_UNKNOWN; uavDesc.ViewDimension = D3D12_UAV_DIMENSION_BUFFER; uavDesc.Buffer.FirstElement = 0; uavDesc.Buffer.NumElements = m_volumeWidth*m_volumeHeight*m_volumeDepth; uavDesc.Buffer.StructureByteStride = 4 * sizeof( UINT8 ); uavDesc.Buffer.CounterOffsetInBytes = 0; uavDesc.Buffer.Flags = D3D12_BUFFER_UAV_FLAG_NONE; CD3DX12_CPU_DESCRIPTOR_HANDLE uavHandle( m_cbvsrvuavHeap->GetCPUDescriptorHandleForHeapStart(), RootParameterUAV, m_cbvsrvuavDescriptorSize ); m_device->CreateUnorderedAccessView( m_volumeBuffer.Get(), nullptr, &uavDesc, uavHandle ); free( volumeBuffer ); } // Create the vertex buffer. // Note: ComPtr's are CPU objects but this resource needs to stay in scope until // the command list that references it has finished executing on the GPU. // We will flush the GPU at the end of this method to ensure the resource is not // prematurely destroyed. ComPtr<ID3D12Resource> vertexBufferUpload; { // Define the geometry for a triangle. Vertex cubeVertices[] = { { XMFLOAT3( -128.f, -128.f, -128.f ) }, { XMFLOAT3( -128.f, -128.f, 128.f ) }, { XMFLOAT3( -128.f, 128.f, -128.f ) }, { XMFLOAT3( -128.f, 128.f, 128.f ) }, { XMFLOAT3( 128.f, -128.f, -128.f )}, { XMFLOAT3( 128.f, -128.f, 128.f )}, { XMFLOAT3( 128.f, 128.f, -128.f )}, { XMFLOAT3( 128.f, 128.f, 128.f )}, }; const UINT vertexBufferSize = sizeof( cubeVertices ); VRET( m_device->CreateCommittedResource( &CD3DX12_HEAP_PROPERTIES( D3D12_HEAP_TYPE_UPLOAD ), D3D12_HEAP_FLAG_NONE, &CD3DX12_RESOURCE_DESC::Buffer( vertexBufferSize ), D3D12_RESOURCE_STATE_GENERIC_READ, nullptr, IID_PPV_ARGS( &vertexBufferUpload ) ) ); VRET( m_device->CreateCommittedResource( &CD3DX12_HEAP_PROPERTIES( D3D12_HEAP_TYPE_DEFAULT ), D3D12_HEAP_FLAG_NONE, &CD3DX12_RESOURCE_DESC::Buffer( vertexBufferSize ), D3D12_RESOURCE_STATE_COPY_DEST, nullptr, IID_PPV_ARGS( &m_vertexBuffer ) ) ); DXDebugName( m_vertexBuffer ); D3D12_SUBRESOURCE_DATA vertexData = {}; vertexData.pData = reinterpret_cast< UINT8* >( cubeVertices ); vertexData.RowPitch = vertexBufferSize; vertexData.SlicePitch = vertexBufferSize; UpdateSubresources<1>( m_graphicCmdList.Get(), m_vertexBuffer.Get(), vertexBufferUpload.Get(), 0, 0, 1, &vertexData ); m_graphicCmdList->ResourceBarrier( 1, &CD3DX12_RESOURCE_BARRIER::Transition( m_vertexBuffer.Get(), D3D12_RESOURCE_STATE_COPY_DEST, D3D12_RESOURCE_STATE_VERTEX_AND_CONSTANT_BUFFER )); // Initialize the vertex buffer view. m_vertexBufferView.BufferLocation = m_vertexBuffer->GetGPUVirtualAddress(); m_vertexBufferView.StrideInBytes = sizeof( Vertex ); m_vertexBufferView.SizeInBytes = vertexBufferSize; } // Create the index buffer // Note: ComPtr's are CPU objects but this resource needs to stay in scope until // the command list that references it has finished executing on the GPU. // We will flush the GPU at the end of this method to ensure the resource is not // prematurely destroyed. ComPtr<ID3D12Resource> indexBufferUpload; { uint16_t cubeIndices[] = { 0,2,1, 1,2,3, 4,5,6, 5,7,6, 0,1,5, 0,5,4, 2,6,7, 2,7,3, 0,4,6, 0,6,2, 1,3,7, 1,7,5, }; const UINT indexBufferSize = sizeof( cubeIndices ); VRET( m_device->CreateCommittedResource( &CD3DX12_HEAP_PROPERTIES( D3D12_HEAP_TYPE_UPLOAD ), D3D12_HEAP_FLAG_NONE, &CD3DX12_RESOURCE_DESC::Buffer( indexBufferSize ), D3D12_RESOURCE_STATE_GENERIC_READ, nullptr, IID_PPV_ARGS( &indexBufferUpload ) ) ); VRET( m_device->CreateCommittedResource( &CD3DX12_HEAP_PROPERTIES( D3D12_HEAP_TYPE_DEFAULT ), D3D12_HEAP_FLAG_NONE, &CD3DX12_RESOURCE_DESC::Buffer( indexBufferSize ), D3D12_RESOURCE_STATE_COPY_DEST, nullptr, IID_PPV_ARGS( &m_indexBuffer ) ) ); DXDebugName( m_indexBuffer ); D3D12_SUBRESOURCE_DATA indexData = {}; indexData.pData = reinterpret_cast< UINT8* >( cubeIndices ); indexData.RowPitch = indexBufferSize; indexData.SlicePitch = indexBufferSize; UpdateSubresources<1>( m_graphicCmdList.Get(), m_indexBuffer.Get(), indexBufferUpload.Get(), 0, 0, 1, &indexData ); m_graphicCmdList->ResourceBarrier( 1, &CD3DX12_RESOURCE_BARRIER::Transition( m_indexBuffer.Get(), D3D12_RESOURCE_STATE_COPY_DEST, D3D12_RESOURCE_STATE_INDEX_BUFFER ) ); m_indexBufferView.BufferLocation = m_indexBuffer->GetGPUVirtualAddress(); m_indexBufferView.SizeInBytes = sizeof( cubeIndices ); m_indexBufferView.Format = DXGI_FORMAT_R16_UINT; } // Create the constant buffer { VRET( m_device->CreateCommittedResource( &CD3DX12_HEAP_PROPERTIES( D3D12_HEAP_TYPE_UPLOAD ), D3D12_HEAP_FLAG_NONE, &CD3DX12_RESOURCE_DESC::Buffer( 1024 * 64 ), D3D12_RESOURCE_STATE_GENERIC_READ, nullptr, IID_PPV_ARGS( &m_constantBuffer ) ) ); DXDebugName( m_constantBuffer ); // Describe and create a constant buffer view. D3D12_CONSTANT_BUFFER_VIEW_DESC cbvDesc = {}; cbvDesc.BufferLocation = m_constantBuffer->GetGPUVirtualAddress(); cbvDesc.SizeInBytes = ( sizeof( ConstantBuffer ) + 255 ) & ~255; // CB size is required to be 256-byte aligned. m_device->CreateConstantBufferView( &cbvDesc, m_cbvsrvuavHeap->GetCPUDescriptorHandleForHeapStart() ); // Initialize and map the constant buffers. We don't unmap this until the // app closes. Keeping things mapped for the lifetime of the resource is okay. CD3DX12_RANGE readRange( 0, 0 ); // We do not intend to read from this resource on the CPU. VRET( m_constantBuffer->Map( 0, &readRange, reinterpret_cast< void** >( &m_pCbvDataBegin ) ) ); memcpy( m_pCbvDataBegin, &m_constantBufferData, sizeof( m_constantBufferData ) ); } // Close the command list and execute it to begin the initial GPU setup. VRET( m_graphicCmdList->Close() ); ID3D12CommandList* ppCommandLists[] = { m_graphicCmdList.Get() }; m_graphicCmdQueue->ExecuteCommandLists( _countof( ppCommandLists ), ppCommandLists ); // Create synchronization objects and wait until assets have been uploaded to the GPU. { VRET( m_device->CreateFence( 0, D3D12_FENCE_FLAG_NONE, IID_PPV_ARGS( &m_fence ) ) ); DXDebugName( m_fence ); m_fenceValue = 1; // Create an event handle to use for frame synchronization. m_fenceEvent = CreateEvent( nullptr, FALSE, FALSE, nullptr ); if ( m_fenceEvent == nullptr ) { VRET( HRESULT_FROM_WIN32( GetLastError() ) ); } // Wait for the command list to execute; we are reusing the same command // list in our main loop but for now, we just want to wait for setup to // complete before continuing. WaitForGraphicsCmd(); } XMVECTORF32 vecEye = { 500.0f, 500.0f, -500.0f }; XMVECTORF32 vecAt = { 0.0f, 0.0f, 0.0f }; m_camera.SetViewParams( vecEye, vecAt ); m_camera.SetEnablePositionMovement( true ); m_camera.SetButtonMasks( MOUSE_RIGHT_BUTTON, MOUSE_WHEEL, MOUSE_LEFT_BUTTON ); return S_OK; }
/** * handles all time commands. * @return false when errors, true otherwise */ bool krnlSYStimeCommand(SYStimeCommand command) { bool result = true; struct tm *sTime; struct timeb currTime; if (command != IDLE) { PRINT1("SYStime Command: %u\n", command); } switch (command) { case IDLE: break; case READTIME: ftime(&currTime); sTime = localtime(&currTime.time); pUIWorkspace[mm_SYStimeYear] = sTime->tm_year; pUIWorkspace[mm_SYStimeMonth] = sTime->tm_mon; pUIWorkspace[mm_SYStimeDay] = sTime->tm_mday; pUIWorkspace[mm_SYStimeDayOfWeek] = sTime->tm_wday; pUIWorkspace[mm_SYStimeHour] = sTime->tm_hour; pUIWorkspace[mm_SYStimeMinute] = sTime->tm_min; pUIWorkspace[mm_SYStimeSecond] = sTime->tm_sec; pUIWorkspace[mm_SYStimeMilliSeconds] = currTime.millitm; break; case READUTCTIME: ftime(&currTime); sTime = gmtime(&currTime.time); pUIWorkspace[mm_SYStimeYear] = sTime->tm_year; pUIWorkspace[mm_SYStimeMonth] = sTime->tm_mon; pUIWorkspace[mm_SYStimeDay] = sTime->tm_mday; pUIWorkspace[mm_SYStimeDayOfWeek] = sTime->tm_wday; pUIWorkspace[mm_SYStimeHour] = sTime->tm_hour; pUIWorkspace[mm_SYStimeMinute] = sTime->tm_min; pUIWorkspace[mm_SYStimeSecond] = sTime->tm_sec; pUIWorkspace[mm_SYStimeMilliSeconds] = currTime.millitm; break; case READCOUNTS: /* clock_t counts = clock(); */ /* if (counts != -1) { */ /* pUIWorkspace[mm_SYStimeCounts] = counts; */ /* pUIWorkspace[mm_CountsPerMillisecond] = */ /* (unit) CLOCKS_PER_SEC / 1000; */ /* } else { */ /* PRINTERROR("clock"); */ /* result = false; */ /* } */ /* The use of gettimeofday is better than clock, because * clock depends on the number of clock ticks spend for * this application. */ { struct timeval tv; if (gettimeofday(&tv, NULL) != -1) { pUIWorkspace[mm_SYStimeCounts] = tv.tv_sec * 1000000 + tv.tv_usec; pUIWorkspace[mm_CountsPerMillisecond] = 1000; } else { PRINTERROR("gettimeofday"); result = false; } } break; default: result = false; break; } return result; }
void StreamPropertyListenerProc(void * inClientData, AudioFileStreamID inAudioFileStream, AudioFileStreamPropertyID inPropertyID, UInt32 * ioFlags) { // this is called by audio file stream when it finds property values struct audioPlayer* player = (struct audioPlayer*)inClientData; OSStatus err = noErr; // printf("found property '%c%c%c%c'\n", (inPropertyID>>24)&255, (inPropertyID>>16)&255, (inPropertyID>>8)&255, inPropertyID&255); switch (inPropertyID) { case kAudioFileStreamProperty_ReadyToProducePackets : { // the file stream parser is now ready to produce audio packets. // get the stream format. AudioStreamBasicDescription asbd; UInt32 asbdSize = sizeof(asbd); err = AudioFileStreamGetProperty(inAudioFileStream, kAudioFileStreamProperty_DataFormat, &asbdSize, &asbd); if (err) { PRINTERROR("get kAudioFileStreamProperty_DataFormat"); player->failed = true; break; } //TODO: Is this really right!?! player->songDuration = player->waith.contentLength * 2000 / asbd.mSampleRate; player->samplerate = asbd.mSampleRate; player->packetDuration = asbd.mFramesPerPacket / asbd.mSampleRate; // create the audio queue err = AudioQueueNewOutput(&asbd, PianobarAudioQueueOutputCallback, player, NULL, NULL, 0, &player->audioQueue); if (err) { PRINTERROR("AudioQueueNewOutput"); player->failed = true; break; } // allocate audio queue buffers for (unsigned int i = 0; i < kNumAQBufs; ++i) { err = AudioQueueAllocateBuffer(player->audioQueue, kAQBufSize, &player->audioQueueBuffer[i]); if (err) { PRINTERROR("AudioQueueAllocateBuffer"); player->failed = true; break; } } // get the cookie size UInt32 cookieSize; Boolean writable; err = AudioFileStreamGetPropertyInfo(inAudioFileStream, kAudioFileStreamProperty_MagicCookieData, &cookieSize, &writable); if (err) { PRINTERROR("info kAudioFileStreamProperty_MagicCookieData"); break; } // get the cookie data void* cookieData = calloc(1, cookieSize); err = AudioFileStreamGetProperty(inAudioFileStream, kAudioFileStreamProperty_MagicCookieData, &cookieSize, cookieData); if (err) { PRINTERROR("get kAudioFileStreamProperty_MagicCookieData"); free(cookieData); break; } // set the cookie on the queue. err = AudioQueueSetProperty(player->audioQueue, kAudioQueueProperty_MagicCookie, cookieData, cookieSize); free(cookieData); if (err) { PRINTERROR("set kAudioQueueProperty_MagicCookie"); break; } // listen for kAudioQueueProperty_IsRunning err = AudioQueueAddPropertyListener(player->audioQueue, kAudioQueueProperty_IsRunning, AudioQueueIsRunningCallback, player); if (err) { PRINTERROR("AudioQueueAddPropertyListener"); player->failed = true; break; } break; } } }
/** * handles all new commands. * @return 1 when errors, 0 otherwise */ bool krnlNetCommand(NetCommand command) { bool result = true; #ifdef pr_network int name_len; int addr_len; int sock = 0; socklen_t s_len = 0; char *hostname; char *hostaddr; struct sockaddr_in name; struct hostent *hostinfo; struct timeval sto; /* for NETISEXCEPTED, NETISREADABLE and NETISWRITABLE */ fd_set fds; /* for NETISEXCEPTED, NETISREADABLE and NETISWRITABLE */ unit *client_vector; if (command != IDLE) { PRINT1("Net Command: %u\n", command); } switch (command) { case IDLE: break; case GETHOSTBYNAME: name_len = ustrlen(&pWorkspace[pUIWorkspace[mm_HostName]]); hostname = (char *) malloc(name_len + 1); utrsstring(hostname, &pWorkspace[pUIWorkspace[mm_HostName]]); hostinfo = gethostbyname(hostname); free(hostname); if (hostinfo == NULL) { result = false; pUIWorkspace[mm_NetStatus] = (NETFAILURE | READY); break; } memcpy(&(name.sin_addr.s_addr), hostinfo->h_addr, hostinfo->h_length); btrsstring((unit *) & pWorkspace[pUIWorkspace[mm_HostAddress]], inet_ntoa(name.sin_addr)); pUIWorkspace[mm_NetStatus] = (NETSUCCESS | READY); break; case GETHOSTBYADDR:{ struct in_addr h_addr; addr_len = ustrlen(&pWorkspace[pUIWorkspace[mm_HostAddress]]); hostaddr = (char *) malloc(addr_len + 1); utrsstring(hostaddr, &pWorkspace[pUIWorkspace[mm_HostAddress]]); if (inet_aton(hostaddr, &h_addr) == 0) { free(hostaddr); result = false; pUIWorkspace[mm_NetStatus] = (NETFAILURE | READY); break; } free(hostaddr); hostinfo = gethostbyaddr(&h_addr, sizeof(h_addr), AF_INET); if (hostinfo == NULL) { result = false; pUIWorkspace[mm_NetStatus] = (NETFAILURE | READY); break; } btrsstring(&pWorkspace[pUIWorkspace[mm_HostName]], hostinfo->h_name); pUIWorkspace[mm_NetStatus] = (NETSUCCESS | READY); break; } case GETPEERBYSOCKET: s_len = sizeof(name); if (getpeername ((int) pUIWorkspace[mm_Socket], (struct sockaddr *) &name, &s_len) < 0) { PRINTERROR("getpeername"); result = false; break; } if (name.sin_family != AF_INET) { result = false; break; } pUIWorkspace[mm_Port] = name.sin_port; btrsstring((unit *) & pWorkspace[pUIWorkspace[mm_HostAddress]], inet_ntoa(name.sin_addr)); break; case CANCELREQUEST: /* since the GETHOSTBY(NAME)(ADDR) are implemented synchronically, * this routine isn't needed, and always returns sucessfully. */ break; case NETOPEN: /* create socket */ sock = socket(PF_INET, SOCK_STREAM, 0); if (sock < 0) { PRINTERROR("socket"); result = false; break; } /* set socket options */ if (fcntl(sock, F_SETFL, fcntl(sock, F_GETFL, 0) | O_NONBLOCK) < 0) { PRINTERROR("fcntl"); result = false; break; } pUIWorkspace[mm_Socket] = sock; break; case NETCLOSE:{ int i; /* make network command deaf */ pUIWorkspace[mm_NetStatus] &= !GODEAF; if (close((int) pUIWorkspace[mm_Socket]) < 0) { PRINTERROR("shutdown"); result = false; } /* find if socket in client vector */ client_vector = &pWorkspace[pUIWorkspace[mm_Clients]]; sock = pUIWorkspace[mm_Socket]; /* start search */ i = 0; while ((client_vector[i] != sock) && (i < pUIWorkspace[mm_Connections])) i++; if (i == pUIWorkspace[mm_Connections]) { PRINT1("Socket (%u) not in client vector.\n", sock); result++; } else { /* remove socket from client vector */ memmove(&client_vector[i], &client_vector[i + 1], (pUIWorkspace[mm_Connections] - i - 1) * sizeof(unit)); pUIWorkspace[mm_Connections]--; } pUIWorkspace[mm_NetStatus] |= GODEAF; break; } case NETCONNECT: /** in: [Socket] * [HostAddress] * [Port] * fails or ends * stat_NETISWRITABLE * stat_NETISEXCEPTED */ addr_len = ustrlen(&pWorkspace[pUIWorkspace[mm_HostAddress]]); hostaddr = (char *) malloc(addr_len + 1); utrsstring(hostaddr, &pWorkspace[pUIWorkspace[mm_HostAddress]]); if (!(inet_aton(hostaddr, &name.sin_addr))) { result = false; free(hostaddr); break; } free(hostaddr); name.sin_family = AF_INET; name.sin_port = htons((uint16_t) pUIWorkspace[mm_Port]); if (connect ((int) pUIWorkspace[mm_Socket], (struct sockaddr *) &name, sizeof(name)) == -1 && (errno != EINPROGRESS)) { /* possibly check for the cause of error. (errno) */ PRINTERROR("connect"); result = false; break; } break; case NETLISTEN: /* get the length of the [Host Address] string. */ addr_len = ustrlen(&pWorkspace[pUIWorkspace[mm_HostAddress]]); hostaddr = (char *) malloc(addr_len + 1); utrsstring(hostaddr, &pWorkspace[pUIWorkspace[mm_HostAddress]]); if (!(inet_aton(hostaddr, &name.sin_addr))) { result = false; free(hostaddr); break; } free(hostaddr); name.sin_family = AF_INET; name.sin_port = htons((uint16_t) pUIWorkspace[mm_Port]); if (bind ((int) pUIWorkspace[mm_Socket], (struct sockaddr *) &name, sizeof(name)) < 0) { PRINTERROR("bind"); result = false; break; } if (listen ((int) pUIWorkspace[mm_Socket], pUIWorkspace[mm_MaxConnections]) < 0) { PRINTERROR("listen"); result = false; break; } if (pthread_create(&netThread, NULL, ll_socket_listen, NULL) == 0) { PRINT("Net is listening...\n"); /* successfully initialized, */ } else { result = false; } break; case NETSEND:{ ssize_t s_size = send((int) pUIWorkspace[mm_Socket], &pWorkspace[pUIWorkspace [mm_NetBlockPointer]], pUIWorkspace[mm_NetBlockSize], 0); if (s_size == -1) { PRINTERROR("send"); pUIWorkspace[mm_NetBlockSize] = 0; result = false; break; } pUIWorkspace[mm_NetBlockSize] = s_size; } break; case NETRECV:{ ssize_t s_size = recv((int) pUIWorkspace[mm_Socket], &pWorkspace[pUIWorkspace [mm_NetBlockPointer]], pUIWorkspace[mm_NetBlockSize], 0); if (s_size == -1) { PRINTERROR("recv"); pUIWorkspace[mm_NetBlockSize] = 0; result = false; break; } pUIWorkspace[mm_NetBlockSize] = s_size; } break; case NETISREADABLE: sto.tv_sec = 0; sto.tv_usec = 0; FD_ZERO(&fds); FD_SET((int) pUIWorkspace[mm_Socket], &fds); if (select ((int) pUIWorkspace[mm_Socket] + 1, &fds, NULL, NULL, &sto) != 1) result = false; break; case NETISWRITABLE: sto.tv_sec = 0; sto.tv_usec = 0; FD_ZERO(&fds); FD_SET((int) pUIWorkspace[mm_Socket], &fds); if (select ((int) pUIWorkspace[mm_Socket] + 1, NULL, &fds, NULL, &sto) != 1) result = false; break; case NETISEXCEPTED: sto.tv_sec = 0; sto.tv_usec = 0; FD_ZERO(&fds); FD_SET((int) pUIWorkspace[mm_Socket], &fds); if (select ((int) pUIWorkspace[mm_Socket] + 1, NULL, NULL, &fds, &sto) != 1) result = false; break; default: result = false; break; } #else /* #ifndef pr_network */ result = false; PRINT("Net Command not enabled in RTM.\n"); #endif /* pr_network */ return result; }
bool loadParams(int argc, char ** argv, std::string& worldFilename, std::string& robotFilename, std::string& objectFilename, std::string& outputDirectory) { worldFilename.clear(); robotFilename.clear(); objectFilename.clear(); outputDirectory.clear(); boost::program_options::variables_map vm; try { vm = loadParams(argc, argv); } catch (std::exception const& e) { PRINTERROR("Exception caught: " << e.what()); return false; } catch (...) { PRINTERROR("Exception caught"); return false; } boost::program_options::options_description desc = getOptions(); // desc=getOptions(); if (vm.count("help")) { PRINTMSG(desc); return false; } if (vm.count("dir") < 1) { PRINTERROR("Must specify an output directory"); PRINTMSG(desc); return false; } if (vm.count("wld") && (vm.count("rob") || vm.count("obj"))) { PRINTERROR("Cannot specify a world and a robot and/or object at the same time."); PRINTMSG(desc); return false; } if (!vm.count("wld") && !vm.count("rob")) { PRINTERROR("Have to specify either a robot or a world."); PRINTMSG(desc); return false; } if (vm.count("rob") != vm.count("obj")) { PRINTERROR("If you specify a robot, you also have to specify an object, and vice versa."); PRINTMSG(desc); return false; } if (vm.count("rob") > 1) { PRINTERROR("You can only specify one robot at this stage."); PRINTMSG(desc); return false; } if (vm.count("obj") > 1) { PRINTERROR("You can only specify one object at this stage."); PRINTMSG(desc); return false; } if (vm.count("obj") != vm.count("rob")) { PRINTERROR("If you specify a robot, you should also specify an object."); PRINTMSG(desc); return false; } if (vm.count("wld")) { worldFilename = vm["wld"].as<std::string>(); PRINTMSG("World file is " << worldFilename); } if (vm.count("rob")) { robotFilename = vm["rob"].as<std::string>(); PRINTMSG("Robot file is " << robotFilename); } if (vm.count("obj")) { objectFilename = vm["obj"].as<std::string>(); PRINTMSG("Object file is " << objectFilename); } if (vm.count("dir")) { outputDirectory = vm["dir"].as<std::string>(); PRINTMSG("Output dir is " << outputDirectory); } return true; }
int main(int argc, char **argv) { signal(SIGSEGV, handler); signal(SIGABRT, handler); PRINT_INIT_STD(); std::string worldFilename; std::string robotFilename; std::string objectFilename; std::string outputDirectory; if (!loadParams(argc, argv, worldFilename, robotFilename, objectFilename, outputDirectory)) { return 1; } PRINTMSG("Creating planner"); std::string name = "EigenGraspPlanner1"; // TODO make parameter SHARED_PTR<GraspIt::GraspItSceneManager> graspitMgr(new GraspIt::GraspItSceneManagerNoGui()); #ifdef USE_EIGENGRASP_NOQT SHARED_PTR<GraspIt::EigenGraspPlannerNoQt> p(new GraspIt::EigenGraspPlannerNoQt(name, graspitMgr)); #else SHARED_PTR<GraspIt::EigenGraspPlanner> p(new GraspIt::EigenGraspPlanner(name, graspitMgr)); #endif if (!worldFilename.empty()) { PRINTMSG("Loading world"); graspitMgr->loadWorld(worldFilename); } else { // TODO add an option to set the transforms. // For now, they're put in the origin. For the planning, this should not really matter... GraspIt::EigenTransform robotTransform; GraspIt::EigenTransform objectTransform; robotTransform.setIdentity(); objectTransform.setIdentity(); // objectTransform.translate(Eigen::Vector3d(100,0,0)); std::string robotName("Robot1"); // TODO parameterize std::string objectName("Object1"); if ((graspitMgr->loadRobot(robotFilename, robotName, robotTransform) != 0) || (graspitMgr->loadObject(objectFilename, objectName, true, objectTransform))) { PRINTERROR("Could not load robot or object"); return 1; } // in case one wants to view the initial world before planning, save it: graspitMgr->saveGraspItWorld(outputDirectory + "/worlds/startWorld.xml"); graspitMgr->saveInventorWorld(outputDirectory + "/worlds/startWorld.iv"); } // now save the world again as inventor file, to test // p->saveIVWorld("test.iv"); int maxPlanningSteps = 50000; int repeatPlanning = 1; int keepMaxPlanningResults = 3; bool finishWithAutograsp = false; p->plan(maxPlanningSteps, repeatPlanning, keepMaxPlanningResults, finishWithAutograsp); PRINTMSG("Saving results as world files"); bool createDir = true; bool saveIV = true; bool saveWorld = true; std::string resultsWorldDirectory = outputDirectory; std::string filenamePrefix = "world"; p->saveResultsAsWorldFiles(resultsWorldDirectory, filenamePrefix, saveWorld, saveIV, createDir); std::vector<GraspIt::EigenGraspResult> allGrasps; p->getResults(allGrasps); PRINTMSG("Grasp results:"); std::vector<GraspIt::EigenGraspResult>::iterator it; for (it = allGrasps.begin(); it != allGrasps.end(); ++it) { PRINTMSG(*it); } PRINTMSG("Quitting program."); return 1; }
int run(int argc, char **argv) { if (argc < 3) { printHelp(argv[0]); return 0; } std::string output_path; if (argc >= 4) { output_path=std::string(argv[3]); if (!makeDirectoryIfNeeded(output_path)) { PRINTERROR("Could not create directory "<<output_path); return 0; } } if (output_path.empty()) { PRINTWARN("No output path configured, will print results on screen only."); printHelp(argv[0]); } const std::string robotArg(argv[1]); const std::string objectArg(argv[2]); PRINTMSG("Planning for robot ID=" << robotArg << " to grasp object ID=" << objectArg); // TODO parameterize this std::string egPlanningTopic = "graspit_eg_planning"; ros::NodeHandle n; ros::ServiceClient client = n.serviceClient<manipulation_msgs::GraspPlanning>(egPlanningTopic); // Should use a client here to query the database for information about the // object type. For now, object type information is not used in the planning request, // as the service looks up the object type itself. So we can leave these on arbitrary values. object_recognition_msgs::ObjectType dbModelType; dbModelType.key = "NotAvailabeYet"; dbModelType.db = "SimpleGraspItDatabase"; // Here we can set a different pose to put the object at in the current // graspit world. If this the reference frame is "0", // it uses the object's current pose in the world. If it is "1", // we will use the pose specified in the following field. geometry_msgs::PoseStamped modelPose; modelPose.header.frame_id = "0"; /* modelPose.header.frame_id="1"; modelPose.pose.orientation.w=1; modelPose.pose.position.x=100; */ household_objects_database_msgs::DatabaseModelPose dbModel; dbModel.model_id = atoi(objectArg.c_str()); // TODO move away from atoi at some stage dbModel.type = dbModelType; dbModel.pose = modelPose; dbModel.confidence = 1; dbModel.detector_name = "manual_detection"; manipulation_msgs::GraspableObject obj; // the reference frame could be one that is relative to all fields (e.g. cluster and // all potential models). However at the moment, the graspit planner only supports // the global frame (the graspit origin). No tf transforms are considered in the // GraspIt planner service yet. obj.reference_frame_id = dbModel.pose.header.frame_id; obj.potential_models.push_back(dbModel); // obj.cluster = we will not provide a point cloud // obj.region = and not the SceneRegion along with it either. // obj.collision_name = could think about whether providing this as parameter too manipulation_msgs::GraspPlanning srv; srv.request.arm_name = robotArg; srv.request.target = obj; srv.request.collision_object_name = obj.collision_name; // srv.request.collision_support_surface_name = will not provide this here // srv.request.grasps_to_evaluate = no grasps to evaluate with this client // srv.request.movable_obstacles = this is not supported by this client if (!client.call(srv)) { PRINTERROR("Failed to call service"); return 1; } if (srv.response.error_code.value != manipulation_msgs::GraspPlanningErrorCode::SUCCESS) { PRINTERROR("Could do the grasp planning. Error code " << srv.response.error_code.value); return 1; } PRINTMSG("Successfully finished grasp planning. Have " << srv.response.grasps.size() << " resulting grasps."); std::vector<manipulation_msgs::Grasp>::iterator it; int i=1; for (it = srv.response.grasps.begin(); it != srv.response.grasps.end(); ++it) { if (!output_path.empty()) { std::stringstream filename; filename<<output_path<<"/Grasp_"<<i<<".msg"; std::stringstream filename_txt; filename_txt<<output_path<<"/Grasp_"<<i<<"_string.msg"; ++i; if (!saveToFile(*it, filename.str(), true)) { PRINTERROR("Could not save to file "<<filename.str()); continue; } saveToFile(*it, filename_txt.str(), false); } else { PRINTMSG(*it); } } return 0; }