void OCLPerfDoubleDMA::close() { for (unsigned int i = 0; i < buffers().size(); ++i) { error_ = clReleaseMemObject(buffers()[i]); } buffers_.clear(); if (kernel_ != 0) { error_ = clReleaseKernel(kernel_); } if (program_ != 0) { error_ = clReleaseProgram(program_); } if (context_) { error_ = clReleaseContext(context_); } if (devices_) { delete [] devices_; } if(hostPtr_) { delete hostPtr_; } }
BufferManagerUnicap::BufferManagerUnicap(Filter const * const _filter, unsigned int _buffers, unsigned int _bufferSize, unicap_handle_t _handle, unicap_format_t _format) throw (std::bad_alloc) : Super(_filter, _buffers, _bufferSize, NULL), handle_(_handle), format_(_format) { // get device parameters const Video::DeviceParameters * devParams = dynamic_cast<DeviceParameters const *>(_filter->parameters()); camParams_ = &devParams->camera; MIRO_LOG_OSTR(LL_NOTICE, "\n camera parameters:\n" << *camParams_); // init callback structure callback_.index = 0; callback_.mutex = new Miro::Mutex(); callback_.condition = new Miro::Condition(*callback_.mutex); callback_.buffer = new unsigned char * [buffers()]; for (unsigned int i=0; i<buffers(); ++i) callback_.buffer[i] = bufferAddr(i); // register callback unicap_register_callback(handle_, UNICAP_EVENT_NEW_FRAME, (unicap_callback_t) newFrameCallback, (void*)&callback_); // Start the capture process on the device if( !SUCCESS( unicap_start_capture( handle_ ) ) ) throw Miro::Exception("DeviceUnicap: Failed to start capture on unicap device"); }
Reslice::Reslice(int argc, char** argv) { std::cerr << "Reslice Client\n"; if (!CL::parse(argc, argv, "Slice")) return; if (verbosity >= 1) std::cerr << buildString() << '\n'; Buffer2DInfo info; std::string description; std::vector<CBuffer2D> buffers(numSlices); buffers[0].load(inName + "0.tiff", info, description); for (size_t i = 1; i < static_cast<size_t>(numSlices); ++i) { Buffer2DInfo newInfo; buffers[i].load(inName + std::to_string(i) + ".tiff", newInfo, description); if (info != newInfo) AURORA_THROW(EInvalidParameter, "Buffers do not match"); } const int64_t cols = info.cols(); const int64_t rows = info.rows(); if (-1 == row) row.setValue(rows / 2); std::cerr << "Row: " << row << '\n'; if ("NOTSET" == outName) { std::cout << "# x z value\n"; // output to std::out for (int64_t z = 0; z < numSlices; ++z) { std::cout << '\n'; for (int64_t x = 0; x < cols; ++x) { std::cout << x << ' ' << z << ' ' << buffers[static_cast<size_t>(z)].pixel(x, row).real() << '\n'; } } } else { CBuffer2D result(cols, numSlices); for (int64_t z = 0; z < numSlices; ++z) for (int64_t x = 0; x < cols; ++x) result.pixel(x, z) = buffers[static_cast<size_t>(z)].pixel(x, row); Buffer2DInfo resultInfo(cols, numSlices, info.width(), numSlices, info.energy()); result.save(outName, resultInfo, ""); } }
PointBufferSet Splitter::run(PointBufferPtr buf) { PointBufferSet pbSet; if (!buf->size()) return pbSet; CoordCompare compare; std::map<Coord, PointBufferPtr, CoordCompare> buffers(compare); // Use the location of the first point as the origin. double xOrigin = buf->getFieldAs<double>(Dimension::Id::X, 0); double yOrigin = buf->getFieldAs<double>(Dimension::Id::Y, 0); // Overlay a grid of squares on the points (m_length sides). Each square // corresponds to a new point buffer. Place the points falling in the // each square in the corresponding point buffer. for (PointId idx = 0; idx < buf->size(); idx++) { int xpos = (buf->getFieldAs<double>(Dimension::Id::X, idx) - xOrigin) / m_length; int ypos = (buf->getFieldAs<double>(Dimension::Id::Y, idx) - yOrigin) / m_length; Coord loc(xpos, ypos); PointBufferPtr& outbuf = buffers[loc]; if (!outbuf) outbuf = buf->makeNew(); outbuf->appendPoint(*buf, idx); } // Pull the buffers out of the map and stick them in the standard // output set, setting the bounds as we go. for (auto bi = buffers.begin(); bi != buffers.end(); ++bi) pbSet.insert(bi->second); return pbSet; }
// snapshot taken callback // "size" is the width and height of yuv picture for registerBuffer. // If it is NULL, use the picture size from parameters. void CameraService::Client::handleShutter(image_rect_type *size #ifdef BOARD_USE_CAF_LIBCAMERA , bool playShutterSoundOnly #endif ) { #ifdef BOARD_USE_CAF_LIBCAMERA if(playShutterSoundOnly) { #endif mCameraService->playSound(SOUND_SHUTTER); #ifdef BOARD_USE_CAF_LIBCAMERA sp<ICameraClient> c = mCameraClient; if (c != 0) { mLock.unlock(); c->notifyCallback(CAMERA_MSG_SHUTTER, 0, 0); } return; } #endif // Screen goes black after the buffer is unregistered. if (mSurface != 0 && !mUseOverlay) { mSurface->unregisterBuffers(); } sp<ICameraClient> c = mCameraClient; if (c != 0) { mLock.unlock(); c->notifyCallback(CAMERA_MSG_SHUTTER, 0, 0); if (!lockIfMessageWanted(CAMERA_MSG_SHUTTER)) return; } disableMsgType(CAMERA_MSG_SHUTTER); // It takes some time before yuvPicture callback to be called. // Register the buffer for raw image here to reduce latency. if (mSurface != 0 && !mUseOverlay) { int w, h; CameraParameters params(mHardware->getParameters()); if (size == NULL) { params.getPictureSize(&w, &h); } else { w = size->width; h = size->height; w &= ~1; h &= ~1; LOG1("Snapshot image width=%d, height=%d", w, h); } // FIXME: don't use hardcoded format constants here ISurface::BufferHeap buffers(w, h, w, h, HAL_PIXEL_FORMAT_YCrCb_420_SP, mOrientation, 0, mHardware->getRawHeap()); mSurface->registerBuffers(buffers); IPCThreadState::self()->flushCommands(); } mLock.unlock(); }
void callMixAudioBufferLogrithmicDRC(std::vector<AudioBuffer> const &audioBuffers, AudioBuffer &mixBuffer, unsigned int samples, float threshold) { std::vector<_Type *> buffers(audioBuffers.size()); for(size_t i=0; i<audioBuffers.size(); ++i) buffers[i]=(_Type *)audioBuffers[i].getBuffer(); mixLogrithmicDRC<_Type>(buffers, (_Type *)mixBuffer.getBuffer(), samples, threshold); }
std::vector<Renderbuffer> makeRenderbufferD1C3(uint32_t width, uint32_t height) { std::vector<Renderbuffer> buffers(4); buffers[0].init(GL_DEPTH_COMPONENT32, width, height, GL_DEPTH_ATTACHMENT); buffers[1].init(GL_RGBA8, width, height, GL_COLOR_ATTACHMENT0); buffers[2].init(GL_RGBA8, width, height, GL_COLOR_ATTACHMENT1); buffers[3].init(GL_RGBA8, width, height, GL_COLOR_ATTACHMENT2); return buffers; }
void callMixLinearAttenuation(std::vector<AudioBuffer> const &audioBuffers, AudioBuffer &mixBuffer, unsigned int samples) { std::vector<_Type *> buffers(audioBuffers.size()); for(size_t i=0; i<audioBuffers.size(); ++i) buffers[i]=(_Type *)audioBuffers[i].getBuffer(); mixLinearAttenuation<_Type>(buffers, (_Type *)mixBuffer.getBuffer(), samples); }
void setResults( const MySqlPreparedStatement& statement, std::vector<std::tuple<Args...>>* const results ) { OutputBinderPrivate::Friend::throwIfParameterCountWrong( sizeof...(Args), statement ); std::vector<MYSQL_BIND> parameters(statement.getFieldCount()); std::vector<std::vector<char>> buffers(statement.getFieldCount()); std::vector<mysql_bind_length_t> lengths(statement.getFieldCount()); std::vector<my_bool> nullFlags(statement.getFieldCount()); // bindParameters needs to know the type of the tuples, and it does this by // taking an example tuple, so just create a dummy // TODO(bskari|2013-03-17) There has to be a better way than this std::tuple<Args...> unused; bindParameters( unused, ¶meters, &buffers, &nullFlags, OutputBinderPrivate::int_<sizeof...(Args) - 1>{}); for (size_t i = 0; i < statement.getFieldCount(); ++i) { // This doesn't need to be set on every type, but it won't hurt // anything, and it will make the OutputBinderParameterSetter // specializations simpler parameters.at(i).length = &lengths.at(i); } int fetchStatus = OutputBinderPrivate::Friend::bindAndExecuteStatement( ¶meters, statement); while (0 == fetchStatus || MYSQL_DATA_TRUNCATED == fetchStatus) { if (MYSQL_DATA_TRUNCATED == fetchStatus) { OutputBinderPrivate::Friend::refetchTruncatedColumns( statement, ¶meters, &buffers, &lengths); } std::tuple<Args...> rowTuple; setResultTuple( &rowTuple, parameters, OutputBinderPrivate::int_<sizeof...(Args) - 1>{}); results->push_back(std::move(rowTuple)); fetchStatus = OutputBinderPrivate::Friend::fetch(statement); } OutputBinderPrivate::Friend::throwIfFetchError(fetchStatus, statement); }
MaterialParameters* ProcessedShaderMaterial::allocMaterialParameters() { ShaderMaterialParameters* smp = new ShaderMaterialParameters(); Vector<GFXShaderConstBufferRef> buffers( __FILE__, __LINE__ ); buffers.setSize(mPasses.size()); for (U32 i = 0; i < mPasses.size(); i++) buffers[i] = _getRPD(i)->shader->allocConstBuffer(); // smp now owns these buffers. smp->setBuffers(mShaderConstDesc, buffers); return smp; }
BOOL CTcpPackClient::SendPackets(const WSABUF pBuffers[], int iCount) { int iNewCount = iCount + 1; unique_ptr<WSABUF[]> buffers(new WSABUF[iNewCount]); DWORD header; if(!::AddPackHeader(pBuffers, iCount, buffers, m_dwMaxPackSize, m_usHeaderFlag, header)) return FALSE; return __super::SendPackets(buffers.get(), iNewCount); }
int main(int argc, char **argv) { geometry(); static_load_arrays(); initGL(argc,argv); initCL(); buffers(); glutDisplayFunc(run_updates); glutKeyboardFunc(getout); glutMainLoop(); return 1; }
void myD3D11DeviceContext::DrawIndexed(UINT IndexCount, UINT StartIndexLocation, INT BaseVertexLocation) { if (g_logger->logInterfaceCalls) g_logger->logInterfaceFile << "DrawIndexed" << endl; if (g_logger->logDrawCalls) g_logger->logDrawFile << "DrawIndexed IndexCount=" << IndexCount << ", StartIndexLocation=" << StartIndexLocation << ", BaseVertexLocation=" << BaseVertexLocation << endl; DrawParameters params(IndexCount, StartIndexLocation, BaseVertexLocation); GPUDrawBuffers buffers(*assets); if (assets->viewportFullScreen()) { params.signature = LocalizedObject::computeSignature(*assets, params, buffers); } if (params.signature != 0) { g_logger->recordSignatureColorPreDraw(*assets, params); LocalizedObjectData data; data.signature = params.signature; LocalizedObject::computeBoundingInfo(*assets, params, buffers, data); data.drawIndex = g_logger->frameRenderIndex; g_logger->curFrame->objectData.push_back(data); if (keyFrameCaptureRate != 0 && g_logger->frameIndex % keyFrameCaptureRate == 0) { LocalizedObject object; object.load(*assets, params, buffers, true); g_logger->curFrame->objectMeshes.push_back(object); } } base->DrawIndexed(IndexCount, StartIndexLocation, BaseVertexLocation); if (params.signature != 0) { g_logger->recordSignatureColorPostDraw(*assets, params, buffers); } const bool reportAIRender = g_logger->capturingFrame; if (reportAIRender) { //g_state->AI->drawIndexed(VSBuffer, VSBuffer, IndexCount, StartIndexLocation, BaseVertexLocation); } if (g_logger->capturingFrame) { g_logger->logFrameCaptureFile << "DrawIndexed-" << g_logger->frameRenderIndex << " IndexCount=" << IndexCount << ", StartIndexLocation=" << StartIndexLocation << ", BaseVertexLocation=" << BaseVertexLocation << endl; g_logger->recordDrawEvent(*assets, params); } }
ssize_t hbm::communication::SocketNonblocking::sendBlocks(const dataBlocks_t &blocks) { std::vector < WSABUF > buffers(blocks.size()); size_t completeLength = 0; WSABUF newWsaBuf; for (dataBlocks_t::const_iterator iter = blocks.begin(); iter != blocks.end(); ++iter) { const dataBlock_t& item = *iter; newWsaBuf.buf = (CHAR*)item.pData; newWsaBuf.len = item.size; buffers.push_back(newWsaBuf); completeLength += item.size; } DWORD bytesWritten = 0; int retVal; retVal = WSASend(m_fd, &buffers[0], buffers.size(), &bytesWritten, 0, NULL, NULL); if (retVal < 0) { int retVal = WSAGetLastError(); if ((retVal != WSAEWOULDBLOCK) && (retVal != WSAEINTR) && (retVal != WSAEINPROGRESS)) { return retVal; } } if (bytesWritten == completeLength) { // we are done! return bytesWritten; } else { size_t blockSum = 0; for (size_t index = 0; index < buffers.size(); ++index) { blockSum += buffers[index].len; if (bytesWritten < blockSum) { // this block was not send completely size_t bytesRemaining = blockSum - bytesWritten; size_t start = buffers[index].len - bytesRemaining; retVal = sendBlock(buffers[index].buf + start, bytesRemaining, false); if (retVal > 0) { bytesWritten += retVal; } else { return -1; } } } } return bytesWritten; }
void split_print_cxx14(message<isRequest, Body, Fields> const& m) { error_code ec; serializer<isRequest, Body, Fields> sr{m}; sr.split(true); std::cout << "Header:" << std::endl; do { sr.next(ec, [&sr](error_code& ec, auto const& buffer) { ec.assign(0, ec.category()); std::cout << buffers(buffer); sr.consume(boost::asio::buffer_size(buffer)); }); } while(! sr.is_header_done()); if(! ec && ! sr.is_done()) { std::cout << "Body:" << std::endl; do { sr.next(ec, [&sr](error_code& ec, auto const& buffer) { ec.assign(0, ec.category()); std::cout << buffers(buffer); sr.consume(boost::asio::buffer_size(buffer)); }); } while(! ec && ! sr.is_done()); } if(ec) std::cerr << ec.message() << std::endl; }
glmesh::Mesh* Map::buildMesh(){ //Create vertex format glmesh::AttributeList attribs; //position attribs.push_back(glmesh::AttribDesc(0, 4, glmesh::VDT_SINGLE_FLOAT, glmesh::ADT_FLOAT)); //color attribs.push_back(glmesh::AttribDesc(1, 4, glmesh::VDT_UNSIGN_BYTE, glmesh::ADT_NORM_FLOAT)); //vertex format from attributes glmesh::VertexFormat vfmt(attribs); //now use of CpuDateWriter to create our buffer glmesh::CpuDataWriter writer(vfmt, 4); { writer.Attrib(0.0f, 0.0f, 0.0f, 1.0f); writer.Attrib<GLubyte>(12, 0, 12, 255); writer.Attrib(0.5f, 0.0f, 0.0f, 1.0f); writer.Attrib<GLubyte>(13, 128, 64, 255); writer.Attrib(0.0f, 0.5f, 0.0f, 1.0f); writer.Attrib<GLubyte>(32, 0, 45, 255); writer.Attrib(0.5f, 0.5f, 0.0f, 1.0f); writer.Attrib<GLubyte>(0, 0, 255, 255); } GLuint buffer = writer.TransferToBuffer(gl::ARRAY_BUFFER, gl::STATIC_DRAW); //Create a mesh variant VAOs GLuint vao; gl::GenVertexArrays(1, &vao); gl::BindBuffer(gl::ARRAY_BUFFER, buffer); gl::BindVertexArray(vao); vfmt.BindAttributes(0); gl::BindBuffer(gl::ARRAY_BUFFER, 0); //shader variance glmesh::MeshVariantMap variants; variants["all"] = vao; glmesh::RenderCmdList cmd_list; cmd_list.DrawArrays(gl::TRIANGLE_STRIP, 0, 4); std::vector<GLuint> buffers(1, buffer); return new glmesh::Mesh(buffers, vao, cmd_list, variants); }
std::vector<AudioBuffer> SoundFX::load_samples(const AudioSpec &_spec, const samples_t &_samples) { std::vector<std::future<void>> futures(_samples.size()); std::vector<AudioBuffer> buffers(_samples.size()); for(unsigned i=0; i<_samples.size(); ++i) { futures[i] = std::async(std::launch::async, [_spec,i,&buffers,&_samples]() { PINFOF(LOG_V1, LOG_AUDIO, "loading %s for %s sound fx\n", _samples[i].file, _samples[i].name); load_audio_file(_samples[i].file, buffers[i], _spec); }); } for(unsigned i=0; i<_samples.size(); ++i) { futures[i].wait(); } return buffers; }
void ApexVertexBuffer::preSerialize(void*) { PX_ASSERT((int32_t)mFormat.getBufferCount() == mParams->buffers.arraySizes[0]); ParamArray<NvParameterized::Interface*> buffers(mParams, "buffers", reinterpret_cast<ParamDynamicArrayStruct*>(&mParams->buffers)); for (uint32_t i = 0; i < mFormat.getBufferCount(); i++) { if (!mFormat.getBufferSerialize(i)) { // [i] no longer needs to be destroyed because the resize will handle it buffers.replaceWithLast(i); mFormat.bufferReplaceWithLast(i); i--; } } PX_ASSERT((int32_t)mFormat.getBufferCount() == mParams->buffers.arraySizes[0]); }
void GLFrameBuffer::onBind() { GLGraphicDevice& gd = *checked_cast<GLGraphicDevice*>(&Context::Instance().getGraphicFactory().getGraphicDevice()); gd.bindGLFrameBuffer(mFBO); if(mFBO != 0) { check_framebuffer_status(); std::vector<GLenum> buffers(mClearViews.size()); for(size_t i=0; i<mClearViews.size(); ++i) { buffers[i] = static_cast<GLenum>(i + GL_COLOR_ATTACHMENT0_EXT); } CHECK_GL_CALL(glDrawBuffers((GLsizei)buffers.size(), &buffers[0])); } else { GLenum targets[] = { GL_BACK_LEFT }; glDrawBuffers(1, &targets[0]); } }
void start_capturing(int deviceDescriptor) { struct v4l2_requestbuffers reqbuf; memset(&reqbuf, 0, sizeof(reqbuf)); reqbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; reqbuf.memory = V4L2_MEMORY_USERPTR; reqbuf.count = 20; if (-1 == ioctl(deviceDescriptor, VIDIOC_REQBUFS, &reqbuf)) { if (errno == EINVAL) printf("Video capturing or userptr-streaming is not supported\n"); else perror("VIDIOC_REQBUFS"); exit(EXIT_FAILURE); } if (reqbuf.count < 5) { printf("Not enough buffer memory\n"); exit(EXIT_FAILURE); } std::vector<frame_buffer> buffers(reqbuf.count); for (size_t i = 0; i < reqbuf.count; i++) { frame_buffer& mapped_buffer = buffers[i]; mapped_buffer.length = CAMERA_FRAME_WIDTH*CAMERA_FRAME_HEIGHT*2; mapped_buffer.pointer = prepare_frame_buffer(mapped_buffer.length); } enum v4l2_buf_type type; for (size_t index = 0; index < buffers.size(); index++) { const frame_buffer& mapped_buffer = buffers[index]; queue_frame_buffer(deviceDescriptor,index,mapped_buffer.pointer,mapped_buffer.length); } type = V4L2_BUF_TYPE_VIDEO_CAPTURE; if (-1 == xioctl(deviceDescriptor, VIDIOC_STREAMON, &type)) perror("VIDIOC_STREAMON"); }
void DreamGenContext::deallocatemem() { uint16 id = (uint16)es; debug(1, "deallocating segment %04x", id); deallocateSegment(id); //fixing invalid entries in the sprite table es = data; uint tsize = 16 * 32; uint16 bseg = data.word(kBuffers); if (!bseg) return; SegmentRef buffers(this); buffers = bseg; uint8 *ptr = buffers.ptr(kSpritetable, tsize); for(uint i = 0; i < tsize; i += 32) { uint16 seg = READ_LE_UINT16(ptr + i + 6); //debug(1, "sprite segment = %04x", seg); if (seg == id) memset(ptr + i, 0xff, 32); } }
zx_status_t VmoPool::Init(const zx::vmo* vmos, size_t num_vmos) { fbl::AllocChecker ac; fbl::Array<ListableBuffer> buffers(new (&ac) ListableBuffer[num_vmos], num_vmos); if (!ac.check()) { return ZX_ERR_NO_MEMORY; } buffers_ = std::move(buffers); free_buffers_.clear_unsafe(); zx_status_t status; for (size_t i = 0; i < num_vmos; ++i) { free_buffers_.push_front(&buffers_[i]); status = buffers_[i].buffer.Map(vmos[i], 0, 0, ZX_VM_PERM_READ | ZX_VM_PERM_WRITE); if (status != ZX_OK) { free_buffers_.clear_unsafe(); buffers_.reset(); return status; } } current_buffer_ = kInvalidCurBuffer; return ZX_OK; }
inline buffers_handle create_buffers(int size) { buffers_handle buffers(new ALuint[size], buffers_delete(size)); alGenBuffers(size, buffers.get()); return buffers; }
int UHD_SAFE_MAIN(int argc, char *argv[]) { // initialize to base values // toggle determines which antenna pair is being used, // where false = east/west, and true = north/south bool toggle, east, north, update = false; float p1,p2,direction; //power pairs and direction int maxSamps; double rate, freq, gain, bandwidth, num, den; int dwelltime; float power[4]= {0,0,0,0}; //corresponding to e,n,w,s size_t total_num_samps; size_t num_acc_samps; size_t num_bins; size_t num_rx_samps=0; Sqlite db("QDFDatabase"); rate = 4000000; freq = 1852500000; //sprint band bandwidth = 1250000; dwelltime = 1000; //total_num_samps= 10000; // or some function of dwell time num_bins= 512; // fft points gain = 20; //needs empirical testing uhd::device_addr_t dev_addr; uhd::rx_metadata_t md; uhd::set_thread_priority_safe(); if( db.needUpdate() ) { freq = static_cast<double>(db.getFrequency()); dwelltime = db.getDwellTime(); db.confirmUpdated(); } //create a usrp device w/ 2 antennas //std::cout << std::endl; uhd::usrp::multi_usrp::sptr usrp = uhd::usrp::multi_usrp::make(dev_addr); usrp->set_rx_subdev_spec(uhd::usrp::subdev_spec_t("A: B:"), 0); //std::cout << boost::format("Using Device: %s") % usrp->get_pp_string() << std::endl; //set the rx sample rate usrp->set_rx_rate(rate); //set the rx center frequency usrp->set_rx_freq(freq); //set the rx rf gain usrp->set_rx_gain(gain); //set the rx bandwidth usrp->set_rx_bandwidth(bandwidth,0); usrp->set_rx_bandwidth(bandwidth,1); //allow for some setup time boost::this_thread::sleep(boost::posix_time::seconds(1)); //setup streaming maxSamps = usrp->get_device()->get_max_recv_samps_per_packet(); uhd::stream_cmd_t stream_cmd(uhd::stream_cmd_t::STREAM_MODE_START_CONTINUOUS); stream_cmd.num_samps = total_num_samps; stream_cmd.stream_now = true; usrp->issue_stream_cmd(stream_cmd); //std::cout << usrp->get_pp_string(); //setup daughterboard gpio........ //create daughterboard interfaces boost::shared_ptr<uhd::usrp::dboard_iface> db_a = usrp->get_rx_dboard_iface(0); boost::shared_ptr<uhd::usrp::dboard_iface> db_b = usrp->get_rx_dboard_iface(1); //initialize data direction (pin 15s out) db_a->set_gpio_ddr(uhd::usrp::dboard_iface::UNIT_RX, 0x8000, 0xf000); db_b->set_gpio_ddr(uhd::usrp::dboard_iface::UNIT_RX, 0x8000, 0xf000); //initialize gpio pins to 0b 1000 0000 0000 0000 (pin 15s high) db_a->set_gpio_out(uhd::usrp::dboard_iface::UNIT_RX, 0x8000, 0xf000); db_b->set_gpio_out(uhd::usrp::dboard_iface::UNIT_RX, 0x8000, 0xf000); //wait long enough to measure boost::this_thread::sleep(boost::posix_time::seconds(1)); /*// gpio verification tests********************************** std::cout << boost::format("gpio0: %f") % db_a->get_gpio_out (uhd::usrp::dboard_iface::UNIT_RX)<< std::endl; std::cout << boost::format("gpio0: %f") % db_b->get_gpio_out(uhd::usrp::dboard_iface::UNIT_RX)<< std::endl; std::cout << boost::format("read ddra: %f") % db_a->get_gpio_ddr (uhd::usrp::dboard_iface::UNIT_RX)<< std::endl; std::cout << boost::format("read ddrb: %f") % db_b->get_gpio_ddr (uhd::usrp::dboard_iface::UNIT_RX)<< std::endl; //set gpio pins to 0b 0000 0000 0000 0000 (pin 15 low) db_a->set_gpio_out(uhd::usrp::dboard_iface::UNIT_RX, 0x0000, 0x8000); db_b->set_gpio_out(uhd::usrp::dboard_iface::UNIT_RX, 0x0000, 0x8000); //wait long enough to verify boost::this_thread::sleep(boost::posix_time::seconds(1)); //display for verification std::cout << boost::format("gpio0: %f") % db_a->get_gpio_out (uhd::usrp::dboard_iface::UNIT_RX)<< std::endl; std::cout << boost::format("gpio0: %f") % db_b->get_gpio_out(uhd::usrp::dboard_iface::UNIT_RX)<< std::endl; std::cout << boost::format("read ddra: %f") % db_a->get_gpio_ddr (uhd::usrp::dboard_iface::UNIT_RX)<< std::endl; std::cout << boost::format("read ddrb: %f") % db_b->get_gpio_ddr (uhd::usrp::dboard_iface::UNIT_RX)<< std::endl; //set gpio pins to 0b 1000 0000 0000 0000 (pin 15 high) db_a->set_gpio_out(uhd::usrp::dboard_iface::UNIT_RX, 0x8000, 0x8000); db_b->set_gpio_out(uhd::usrp::dboard_iface::UNIT_RX, 0x8000, 0x8000); boost::this_thread::sleep(boost::posix_time::seconds(1)); *///end gpio verification tests******************************* //create data buffers and initialize them std::complex<float> buff[num_bins]; std::complex<float> buff1[num_bins]; std::vector<std::complex<float>* > buffers(2); buffers[0] = &buff[0]; buffers[1] = &buff1[0]; for(int i = 0; i < num_bins; i++) { buff[i] = 0; buff1[i] = 0; } //------------------------------------------------------------------ //-- Main loop //------------------------------------------------------------------ while (true) { //read a buffer's worth of samples every iteration num_rx_samps = usrp->get_device()->recv(buffers, num_bins, md, uhd::io_type_t::COMPLEX_FLOAT32, uhd::device::RECV_MODE_FULL_BUFF); if (num_rx_samps != num_bins) continue; //calculate the dfts aoa::log_pwr_dft_type lpdft1(aoa::log_pwr_dft(&buff[0], num_rx_samps)); aoa::log_pwr_dft_type lpdft2(aoa::log_pwr_dft(&buff1[0], num_rx_samps)); /*//dft data verification //verifying dft for(int i = 0; i < num_bins; i++) { std::cout << lpdft1[i]<< std::endl; } //verify iq pairs for(int i = 0; i < num_bins; i++) { std::cout << buff1[i]<< std::endl; } *///end dft data verification p1 = 0; p2 = 0; for(int i = 0; i < num_bins; i++) { p1 += lpdft1[i]; p2 += lpdft2[i]; } p1=p1/num_bins; p2=p2/num_bins; // determine which oppsosing antenna is recieving the most power if(toggle) { p1>p2 ? east=true : east=false; power[0]=p1; power[2]=p2; //set db_a gpio (pin 15 high) //set db_b gpio (pin 15 low) db_a->set_gpio_out(uhd::usrp::dboard_iface::UNIT_RX, 0x8000, 0x8000); db_b->set_gpio_out(uhd::usrp::dboard_iface::UNIT_RX, 0x0000, 0x8000); } else { p1>p2?north=true:north=false; power[1]=p1; power[3]=p2; //set db_a gpio (pin 15 low) //set db_b gpio (pin 15 high) db_a->set_gpio_out(uhd::usrp::dboard_iface::UNIT_RX, 0x0000, 0x8000); db_b->set_gpio_out(uhd::usrp::dboard_iface::UNIT_RX, 0x8000, 0x8000); }//end opposing power if/else //do we have data from both sets of antennas? if(toggle) { //then update direction by determining which 2 antennas //had the highest recieved power, determining which was //more powerful, taking the arctan, and adding an //appropriate quadrant offset if(east && north) { //quadrant I power[0]>power[1] ? num=power[1] : num=power[0]; power[0]>power[1] ? den=power[0] : den=power[1]; direction=(atan(num/den)*180/pi); } else if(east && !north) { //quadrant IV power[0]>power[3] ? num=power[0] : num=power[3]; power[0]>power[3] ? den=power[3] : den=power[0]; direction=(atan(num/den)*180/pi) + 270; } else if(!east && north) { //quadrant II power[2]>power[1] ? num=power[2] : num=power[1]; power[2]>power[1] ? den=power[1] : den=power[2]; direction=(atan(num/den)*180/pi) + 90; } else { //quadrant III power[2]>power[3] ? num=power[2] : num=power[3]; power[2]>power[3] ? den=power[3] : den=power[2]; direction=(atan(num/den)*180/pi) + 180; }// end if/else direction setting std::cout << "direction = "<<direction<< std::endl; //do we have user updates? (we do this in here so we have a //full data set from both antenna pairs before changing parameters if (update) { /* //set the rx center frequency usrp->set_rx_freq(freq); total_num_samps=some function of dwell time and sampling rate */ }//end if update }//end toggle check if toggle=!toggle; //update which antennas are being used }//end main loop.....!......!!.....!.!!.!!! return 0; }//end aoa.cpp
//客户端模式由这个函数实现 int cliopen(char *host, char *port) { int fd, i, on; char *protocol; unsigned long inaddr; struct sockaddr_in cli_addr, serv_addr; struct servent *sp; struct hostent *hp; protocol = udp ? "udp" : "tcp"; /* initialize socket address structure */ bzero((char *) &serv_addr, sizeof(serv_addr)); serv_addr.sin_family = AF_INET; /* see if "port" is a service name or number */ if ( (i = atoi(port)) == 0) { if ( (sp = getservbyname(port, protocol)) == NULL) err_quit("getservbyname() error for: %s/%s", port, protocol); serv_addr.sin_port = sp->s_port; } else serv_addr.sin_port = htons(i);//主机字节序变成网络字节序 /* * First try to convert the host name as a dotted-decimal number. * Only if that fails do we call gethostbyname(). */ //转化host字符串为ip地址 //转化有问题时,尝试gethostbyname if ( (inaddr = inet_addr(host)) != INADDR_NONE) { /* it's dotted-decimal */ bcopy((char *) &inaddr, (char *) &serv_addr.sin_addr, sizeof(inaddr)); } else { if ( (hp = gethostbyname(host)) == NULL) err_quit("gethostbyname() error for: %s", host); bcopy(hp->h_addr, (char *) &serv_addr.sin_addr, hp->h_length); } //创建socket //根据参数选择是udp还是tcp if ( (fd = socket(AF_INET, udp ? SOCK_DGRAM : SOCK_STREAM, 0)) < 0) err_sys("socket() error"); //重用端口地址 //如果你的服务程序停止后想立即重启,而新套接字依旧使用同一端口, //此时SO_REUSEADDR 选项非常有用。 if (reuseaddr) { on = 1; if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, (char *) &on, sizeof (on)) < 0) err_sys("setsockopt of SO_REUSEADDR error"); } /* * User can specify port number for client to bind. Only real use * is to see a TCP connection initiated by both ends at the same time. * Also, if UDP is being used, we specifically call bind() to assign * an ephemeral port to the socket. */ if (bindport != 0 || udp) { bzero((char *) &cli_addr, sizeof(cli_addr)); cli_addr.sin_family = AF_INET; cli_addr.sin_addr.s_addr = htonl(INADDR_ANY); /* wildcard */ cli_addr.sin_port = htons(bindport); if (bind(fd, (struct sockaddr *) &cli_addr, sizeof(cli_addr)) < 0) err_sys("bind() error"); } /* Need to allocate buffers before connect(), since they can affect * TCP options (window scale, etc.). */ buffers(fd);//申请读写buffer以及套接字的滑动窗口 //设置socket选项getsockopt & setsockopt sockopts(fd, 0); /* may also want to set SO_DEBUG */ /* * Connect to the server. Required for TCP, optional for UDP. */ if (connect(fd, (struct sockaddr *) &serv_addr, sizeof(serv_addr)) < 0) err_sys("connect() error"); //需要提示么 if (verbose) { /* Call getsockname() to find local address bound to socket: TCP ephemeral port was assigned by connect() or bind(); UDP ephemeral port was assigned by bind(). */ i = sizeof(cli_addr); if (getsockname(fd, (struct sockaddr *) &cli_addr, &i) < 0) err_sys("getsockname() error"); /* Can't do one fprintf() since inet_ntoa() stores the result in a static location. */ fprintf(stderr, "connected on %s.%d ", INET_NTOA(cli_addr.sin_addr), ntohs(cli_addr.sin_port)); fprintf(stderr, "to %s.%d\n", INET_NTOA(serv_addr.sin_addr), ntohs(serv_addr.sin_port)); } sockopts(fd, 1); /* some options get set after connect() */ return(fd); }
bool AndroidVideoOutput::InitCheck (AVStream *vstream) { // release resources if previously initialized CloseFrameBuffer(); #if 0 // initialize only when we have all the required parameters if (iVideoDisplayWidth <=0 || iVideoDisplayHeight <= 0 || iVideoWidth <= 0 || iVideoHeight <= 0) { ERROR ("display or frame size error! display(%d, %d), frame(%d, %d)", iVideoDisplayWidth, iVideoDisplayHeight, iVideoWidth, iVideoHeight); return false; } #endif if (InitScaler (vstream) < 0) { ERROR ("InitScaler fail!"); return false; } // copy parameters in case we need to adjust them int displayWidth = iVideoDisplayWidth; int displayHeight = iVideoDisplayHeight; int frameWidth = iVideoWidth; int frameHeight = iVideoHeight; int frameSize; // RGB-565 frames are 2 bytes/pixel displayWidth = (displayWidth + 1) & -2; displayHeight = (displayHeight + 1) & -2; frameWidth = (frameWidth + 1) & -2; frameHeight = (frameHeight + 1) & -2; frameSize = frameWidth * frameHeight * 2; iVideoFrameSize = frameSize; // create frame buffer heap and register with surfaceflinger mFrameHeap = new android::MemoryHeapBase(frameSize * kBufferCount); if (mFrameHeap->heapID() < 0) { ERROR ("Error creating frame buffer heap!"); return false; } DEBUG ("allcate buffers for surface(%p)!(%d, %d, %d, %d)", mSurface.get(), displayWidth, displayHeight, displayWidth, displayHeight); android::ISurface::BufferHeap buffers(displayWidth, displayHeight, displayWidth, displayHeight, android::PIXEL_FORMAT_RGB_565, mFrameHeap); DEBUG ("mSurface(%p)->registerBuffers", mSurface.get()); mSurface->registerBuffers(buffers); // create frame buffers for (int i = 0; i < kBufferCount; i++) { mFrameBuffers[i] = i * frameSize; } #ifdef USE_COLOR_CONVERTER // initialize software color converter iColorConverter = ColorConvert16::NewL(); iColorConverter->Init(displayWidth, displayHeight, frameWidth, displayWidth, displayHeight, displayWidth, CCROTATE_NONE); iColorConverter->SetMemHeight(frameHeight); iColorConverter->SetMode(1); #endif DEBUG ("video = %d x %d", displayWidth, displayHeight); DEBUG ("frame = %d x %d", frameWidth, frameHeight); DEBUG ("frame #bytes = %d", frameSize); // register frame buffers with SurfaceFlinger mFrameBufferIndex = 0; mInitialized = true; //mPvPlayer->sendEvent(MEDIA_SET_VIDEO_SIZE, iVideoDisplayWidth, iVideoDisplayHeight); return mInitialized; }
void operator()(error_code& ec, ConstBufferSequence const& buffer) const { ec.assign(0, ec.category()); std::cout << buffers(buffer); sr.consume(boost::asio::buffer_size(buffer)); }
int servopen(char* host, char* port) { int fd, newfd, i, on, pid; const char* protocol; struct in_addr inaddr; struct servent* sp; protocol = udp ? "udp" : "tcp"; /* Initialize the socket address structure */ bzero(&servaddr, sizeof(servaddr)); servaddr.sin_family = AF_INET; /* Caller normally wildcards the local Internet address, meaning a connection will be accepted on any connected interface. We only allow an IP address for the "host", not a name. */ if (host == NULL) servaddr.sin_addr.s_addr = htonl(INADDR_ANY); /* wildcard */ else { if (inet_aton(host, &inaddr) == 0) err_quit("invalid host name for server: %s", host); servaddr.sin_addr = inaddr; } /* See if "port" is a service name or number */ if ((i = atoi(port)) == 0) { if ((sp = getservbyname(port, protocol)) == NULL) err_ret("getservbyname() error for: %s/%s", port, protocol); servaddr.sin_port = sp->s_port; } else servaddr.sin_port = htons(i); if ((fd = socket(AF_INET, udp ? SOCK_DGRAM : SOCK_STREAM, 0)) < 0) err_sys("socket() error"); if (reuseaddr) { on = 1; if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on)) < 0) err_sys("setsockopt of SO_REUSEADDR error"); } #ifdef SO_REUSEPORT if (reuseport) { on = 1; if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &on, sizeof(on)) < 0) err_sys("setsockopt of SO_REUSEPORT error"); } #endif /* Bind our well-known port so the client can connect to us. */ if (bind(fd, (struct sockaddr*)&servaddr, sizeof(servaddr)) < 0) err_sys("can't bind local address"); join_mcast(fd, &servaddr); if (udp) { buffers(fd); if (foreignip[0] != 0) { /* connect to foreignip/port# */ bzero(&cliaddr, sizeof(cliaddr)); if (inet_aton(foreignip, &cliaddr.sin_addr) == 0) err_quit("invalid IP address: %s", foreignip); cliaddr.sin_family = AF_INET; cliaddr.sin_port = htons(foreignport); /* connect() for datagram socket doesn't appear to allow wildcarding of either IP address or port number */ if (connect(fd, (struct sockaddr*)&cliaddr, sizeof(cliaddr)) < 0) err_sys("connect() error"); } sockopts(fd, 1); return (fd); /* nothing else to do */ } buffers(fd); /* may set receive buffer size; must do here to get correct window advertised on SYN */ sockopts(fd, 0); /* only set some socket options for fd */ listen(fd, listenq); if (pauselisten) sleep_us(pauselisten * 1000); /* lets connection queue build up */ if (dofork) TELL_WAIT(); /* initialize synchronization primitives */ for (;;) { i = sizeof(cliaddr); if ((newfd = accept(fd, (struct sockaddr*)&cliaddr, &i)) < 0) err_sys("accept() error"); if (dofork) { if ((pid = fork()) < 0) err_sys("fork error"); if (pid > 0) { close(newfd); /* parent closes connected socket */ WAIT_CHILD(); /* wait for child to output to terminal */ continue; /* and back to for(;;) for another accept() */ } else { close(fd); /* child closes listening socket */ } } /* child (or iterative server) continues here */ if (verbose) { /* Call getsockname() to find local address bound to socket: local internet address is now determined (if multihomed). */ i = sizeof(servaddr); if (getsockname(newfd, (struct sockaddr*)&servaddr, &i) < 0) err_sys("getsockname() error"); /* Can't do one fprintf() since inet_ntoa() stores the result in a static location. */ fprintf(stderr, "connection on %s.%d ", INET_NTOA(servaddr.sin_addr), ntohs(servaddr.sin_port)); fprintf(stderr, "from %s.%d\n", INET_NTOA(cliaddr.sin_addr), ntohs(cliaddr.sin_port)); } buffers(newfd); /* setsockopt() again, in case it didn't propagate from listening socket to connected socket */ sockopts(newfd, 1); /* can set all socket options for this socket */ if (dofork) TELL_PARENT(getppid()); /* tell parent we're done with terminal */ return (newfd); } }
int cliopen(char *host, char *port) { int fd, i, on; const char *protocol; struct in_addr inaddr; struct servent *sp; struct hostent *hp; protocol = udp ? "udp" : "tcp"; /* initialize socket address structure */ bzero(&servaddr, sizeof(servaddr)); servaddr.sin_family = AF_INET; /* see if "port" is a service name or number */ if ( (i = atoi(port)) == 0) { if ( (sp = getservbyname(port, protocol)) == NULL) err_quit("getservbyname() error for: %s/%s", port, protocol); servaddr.sin_port = sp->s_port; } else servaddr.sin_port = htons(i); /* * First try to convert the host name as a dotted-decimal number. * Only if that fails do we call gethostbyname(). */ if (inet_aton(host, &inaddr) == 1) servaddr.sin_addr = inaddr; /* it's dotted-decimal */ else if ( (hp = gethostbyname(host)) != NULL) memcpy(&servaddr.sin_addr, hp->h_addr, hp->h_length); else err_quit("invalid hostname: %s", host); if ( (fd = socket(AF_INET, udp ? SOCK_DGRAM : SOCK_STREAM, 0)) < 0) err_sys("socket() error"); if (reuseaddr) { on = 1; if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &on, sizeof (on)) < 0) err_sys("setsockopt of SO_REUSEADDR error"); } #ifdef SO_REUSEPORT if (reuseport) { on = 1; if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &on, sizeof (on)) < 0) err_sys("setsockopt of SO_REUSEPORT error"); } #endif /* * User can specify port number for client to bind. Only real use * is to see a TCP connection initiated by both ends at the same time. * Also, if UDP is being used, we specifically call bind() to assign * an ephemeral port to the socket. * Also, for experimentation, client can also set local IP address * (and port) using -l option. Allow localip[] to be set but bindport * to be 0. */ if (bindport != 0 || localip[0] != 0 || udp) { bzero(&cliaddr, sizeof(cliaddr)); cliaddr.sin_family = AF_INET; cliaddr.sin_port = htons(bindport); /* can be 0 */ if (localip[0] != 0) { if (inet_aton(localip, &cliaddr.sin_addr) == 0) err_quit("invalid IP address: %s", localip); } else cliaddr.sin_addr.s_addr = htonl(INADDR_ANY); /* wildcard */ if (bind(fd, (struct sockaddr *) &cliaddr, sizeof(cliaddr)) < 0) err_sys("bind() error"); } /* Need to allocate buffers before connect(), since they can affect * TCP options (window scale, etc.). */ buffers(fd); sockopts(fd, 0); /* may also want to set SO_DEBUG */ /* * Connect to the server. Required for TCP, optional for UDP. */ if (udp == 0 || connectudp) { for ( ; ; ) { if (connect(fd, (struct sockaddr *) &servaddr, sizeof(servaddr)) == 0) break; /* all OK */ if (errno == EINTR) /* can happen with SIGIO */ continue; if (errno == EISCONN) /* can happen with SIGIO */ break; err_sys("connect() error"); } } if (verbose) { /* Call getsockname() to find local address bound to socket: TCP ephemeral port was assigned by connect() or bind(); UDP ephemeral port was assigned by bind(). */ i = sizeof(cliaddr); if (getsockname(fd, (struct sockaddr *) &cliaddr, &i) < 0) err_sys("getsockname() error"); /* Can't do one fprintf() since inet_ntoa() stores the result in a static location. */ fprintf(stderr, "connected on %s.%d ", INET_NTOA(cliaddr.sin_addr), ntohs(cliaddr.sin_port)); fprintf(stderr, "to %s.%d\n", INET_NTOA(servaddr.sin_addr), ntohs(servaddr.sin_port)); } sockopts(fd, 1); /* some options get set after connect() */ return(fd); }
int main( int argc, char** argv ) { ::setenv( "PYLON_ROOT", STRINGIZED( BASLER_PYLON_DIR ), 0 ); ::setenv( "GENICAM_ROOT_V2_1", STRINGIZED( BASLER_PYLON_GENICAM_DIR ), 0 ); try { unsigned int id; std::string address; std::string setAttributes; unsigned int discard; boost::program_options::options_description description( "options" ); unsigned int packet_size; unsigned int exposure; unsigned int gain; unsigned int offset_x; unsigned int offset_y; unsigned int width; unsigned int height; std::string frame_trigger; std::string line_trigger; unsigned int line_rate; double timeout_seconds; description.add_options() ( "help,h", "display help message" ) ( "address", boost::program_options::value< std::string >( &address ), "camera ip address; default: connect to the first available camera" ) ( "discard", "discard frames, if cannot keep up; same as --buffer=1" ) ( "buffer", boost::program_options::value< unsigned int >( &discard )->default_value( 0 ), "maximum buffer size before discarding frames, default: unlimited" ) ( "list-cameras", "output camera list and exit" ) ( "fields,f", boost::program_options::value< std::string >( &options.fields )->default_value( "t,rows,cols,type" ), "header fields, possible values: t,rows,cols,type,size,counters" ) ( "image-type", boost::program_options::value< std::string >( &options.type )->default_value( "3ub" ), "image type, e.g. '3ub'; also see --long-help for details" ) ( "offset-x", boost::program_options::value< unsigned int >( &offset_x )->default_value( 0 ), "offset in pixels in the line" ) ( "offset-y", boost::program_options::value< unsigned int >( &offset_y )->default_value( 0 ), "offset in lines in the frame" ) ( "width", boost::program_options::value< unsigned int >( &width )->default_value( std::numeric_limits< unsigned int >::max() ), "line width in pixels; default: max" ) ( "height", boost::program_options::value< unsigned int >( &height )->default_value( std::numeric_limits< unsigned int >::max() ), "number of lines in frame (in chunk mode always 1); default: max" ) ( "frame-trigger", boost::program_options::value< std::string >( &frame_trigger ), "'line1', 'line2', 'line3', 'encoder'" ) //; if absent while --line-trigger present, same as --line-trigger" ) ( "line-trigger", boost::program_options::value< std::string >( &line_trigger ), "'line1', 'line2', 'line3', 'encoder'" ) ( "line-rate", boost::program_options::value< unsigned int >( &line_rate ), "line aquisition rate" ) ( "encoder-ticks", boost::program_options::value< unsigned int >( &encoder_ticks ), "number of encoder ticks until the counter resets (reused for line number in frame in chunk mode)" ) ( "header-only", "output header only" ) ( "no-header", "output image data only" ) ( "packet-size", boost::program_options::value< unsigned int >( &packet_size ), "mtu size on camera side, should be not greater than your lan and network interface set to" ) ( "exposure", boost::program_options::value< unsigned int >( &exposure )->default_value( 100 ), "exposure" ) ( "gain", boost::program_options::value< unsigned int >( &gain )->default_value( 100 ), "gain" ) ( "timeout", boost::program_options::value< double >( &timeout_seconds )->default_value( 3.0 ), " frame acquisition timeout" ) ( "test-colour", "output colour test image" ) ( "verbose,v", "be more verbose" ); boost::program_options::variables_map vm; boost::program_options::store( boost::program_options::parse_command_line( argc, argv, description), vm ); boost::program_options::parsed_options parsed = boost::program_options::command_line_parser(argc, argv).options( description ).allow_unregistered().run(); boost::program_options::notify( vm ); if ( vm.count( "help" ) || vm.count( "long-help" ) ) { std::cerr << "acquire images from a basler camera (for now gige only)" << std::endl; std::cerr << "output to stdout as serialized cv::Mat" << std::endl; std::cerr << std::endl; std::cerr << "usage: basler-cat [<options>] [<filters>]" << std::endl; std::cerr << std::endl; std::cerr << description << std::endl; if( vm.count( "long-help" ) ) { std::cerr << std::endl; std::cerr << snark::cv_mat::filters::usage() << std::endl; std::cerr << std::endl; std::cerr << snark::cv_mat::serialization::options::type_usage() << std::endl; } std::cerr << std::endl; std::cerr << "note: there is a glitch or a subtle feature in basler line camera:" << std::endl; std::cerr << " - power-cycle camera" << std::endl; std::cerr << " - view colour images: it works" << std::endl; std::cerr << " - view grey-scale images: it works" << std::endl; std::cerr << " - view colour images: it still displays grey-scale" << std::endl; std::cerr << " even in their native viewer you need to set colour image" << std::endl; std::cerr << " repeatedly and with pure luck it works, but we have not" << std::endl; std::cerr << " managed to do it in software; the remedy: power-cycle the camera" << std::endl; std::cerr << std::endl; return 1; } verbose = vm.count( "verbose" ); if( verbose ) { std::cerr << "basler-cat: PYLON_ROOT=" << ::getenv( "PYLON_ROOT" ) << std::endl; std::cerr << "basler-cat: GENICAM_ROOT_V2_1=" << ::getenv( "GENICAM_ROOT_V2_1" ) << std::endl; std::cerr << "basler-cat: initializing camera..." << std::endl; } Pylon::PylonAutoInitTerm auto_init_term; Pylon::CTlFactory& factory = Pylon::CTlFactory::GetInstance(); Pylon::ITransportLayer* transport_layer( Pylon::CTlFactory::GetInstance().CreateTl( Pylon::CBaslerGigECamera::DeviceClass() ) ); if( !transport_layer ) { std::cerr << "basler-cat: failed to create transport layer" << std::endl; std::cerr << " most likely PYLON_ROOT and GENICAM_ROOT_V2_1 environment variables not set" << std::endl; std::cerr << " point them to your pylon installation, e.g:" << std::endl; std::cerr << " export PYLON_ROOT=/opt/pylon" << std::endl; std::cerr << " export GENICAM_ROOT_V2_1=/opt/pylon/genicam" << std::endl; return 1; } if( vm.count( "list-cameras" ) ) { Pylon::DeviceInfoList_t devices; factory.EnumerateDevices( devices ); for( unsigned int i = 0; i < devices.size(); ++i ) { std::cerr << devices[i].GetFullName() << std::endl; } return 0; } timeout = timeout_seconds * 1000.0; std::string filters = comma::join( boost::program_options::collect_unrecognized( parsed.options, boost::program_options::include_positional ), ';' ); options.header_only = vm.count( "header-only" ); options.no_header = vm.count( "no-header" ); csv = comma::csv::options( argc, argv ); bool chunk_mode = csv.has_field( "counters" ) // quick and dirty || csv.has_field( "adjusted-t" ) || csv.has_field( "line" ) || csv.has_field( "line-count" ) || csv.has_field( "ticks" ) || csv.has_field( "counters/adjusted-t" ) || csv.has_field( "counters/line" ) || csv.has_field( "counters/line-count" ) || csv.has_field( "counters/ticks" ); if( chunk_mode ) { if( vm.count( "encoder-ticks" ) == 0 ) { std::cerr << "basler-cat: chunk mode, please specify --encoder-ticks" << std::endl; return 1; } if( !filters.empty() ) { std::cerr << "basler-cat: chunk mode, cannot handle filters; use: basler-cat | cv-cat <filters> instead" << std::endl; return 1; } if( height != 1 && height != std::numeric_limits< unsigned int >::max() ) { std::cerr << "basler-cat: only --height=1 implemented in chunk mode" << std::endl; return 1; } height = 1; std::vector< std::string > v = comma::split( csv.fields, ',' ); std::string format; for( unsigned int i = 0; i < v.size(); ++i ) { if( v[i] == "t" ) { v[i] = "header/" + v[i]; format += "t"; } else if( v[i] == "rows" || v[i] == "cols" || v[i] == "size" || v[i] == "type" ) { v[i] = "header/" + v[i]; format += "ui"; } else if( v[i] == "adjusted-t" ) { v[i] = "counters/" + v[i]; format += "t"; } else if( v[i] == "line-count" || v[i] == "ticks" ) { v[i] = "counters/" + v[i]; format += "ul"; } else if( v[i] == "line" ) { v[i] = "counters/" + v[i]; format += "ui"; } else { std::cerr << "basler-cat: expected field, got '" << v[i] << "'" << std::endl; return 1; } } csv.fields = comma::join( v, ',' ); csv.full_xpath = true; csv.format( format ); } if( !vm.count( "buffer" ) && vm.count( "discard" ) ) { discard = 1; } Pylon::CBaslerGigECamera camera; if( vm.count( "address" ) ) { Pylon::CBaslerGigEDeviceInfo info; info.SetIpAddress( address.c_str() ); camera.Attach( factory.CreateDevice( info ) ); } else { Pylon::DeviceInfoList_t devices; factory.EnumerateDevices( devices ); if( devices.empty() ) { std::cerr << "basler-cat: no camera found" << std::endl; return 1; } std::cerr << "basler-cat: will connect to the first of " << devices.size() << " found device(s):" << std::endl; for( unsigned int i = 0; i < devices.size(); ++i ) { std::cerr << " " << devices[i].GetFullName() << std::endl; } camera.Attach( transport_layer->CreateDevice( devices[0] ) ); } if( verbose ) { std::cerr << "basler-cat: initialized camera" << std::endl; } if( verbose ) { std::cerr << "basler-cat: opening camera " << camera.GetDevice()->GetDeviceInfo().GetFullName() << "..." << std::endl; } camera.Open(); if( verbose ) { std::cerr << "basler-cat: opened camera " << camera.GetDevice()->GetDeviceInfo().GetFullName() << std::endl; } Pylon::CBaslerGigECamera::StreamGrabber_t grabber( camera.GetStreamGrabber( 0 ) ); grabber.Open(); unsigned int channels; switch( options.get_header().type ) // quick and dirty { case CV_8UC1: channels = set_pixel_format_( camera, Basler_GigECamera::PixelFormat_Mono8 ); break; case CV_8UC3: channels = set_pixel_format_( camera, Basler_GigECamera::PixelFormat_RGB8Packed ); break; default: std::cerr << "basler-cat: type \"" << options.type << "\" not implemented or not supported by camera" << std::endl; return 1; } unsigned int max_width = camera.Width.GetMax(); if( offset_x >= max_width ) { std::cerr << "basler-cat: expected --offset-x less than " << max_width << ", got " << offset_x << std::endl; return 1; } camera.OffsetX.SetValue( offset_x ); width = ( ( unsigned long long )( offset_x ) + width ) < max_width ? width : max_width - offset_x; camera.Width.SetValue( width ); unsigned int max_height = camera.Height.GetMax(); //if( height < 512 ) { std::cerr << "basler-cat: expected height greater than 512, got " << height << std::endl; return 1; } // todo: is the colour line 2098 * 3 or ( 2098 / 3 ) * 3 ? //offset_y *= channels; //height *= channels; if( offset_y >= max_height ) { std::cerr << "basler-cat: expected --offset-y less than " << max_height << ", got " << offset_y << std::endl; return 1; } camera.OffsetY.SetValue( offset_y ); height = ( ( unsigned long long )( offset_y ) + height ) < max_height ? height : ( max_height - offset_y ); camera.Height.SetValue( height ); if( verbose ) { std::cerr << "basler-cat: set width,height to " << width << "," << height << std::endl; } if( vm.count( "packet-size" ) ) { camera.GevSCPSPacketSize.SetValue( packet_size ); } // todo: giving up... the commented code throws, but failure to stop acquisition, if active // seems to lead to the following scenario: // - power-cycle camera // - view colour images: it works // - view grey-scale images: it works // - view colour images: it still displays grey-scale //if( verbose ) { std::cerr << "basler-cat: getting aquisition status... (frigging voodoo...)" << std::endl; } //GenApi::IEnumEntry* acquisition_status = camera.AcquisitionStatusSelector.GetEntry( Basler_GigECamera::AcquisitionStatusSelector_AcquisitionActive ); //if( acquisition_status && GenApi::IsAvailable( acquisition_status ) && camera.AcquisitionStatus() ) //{ // if( verbose ) { std::cerr << "basler-cat: stopping aquisition..." << std::endl; } // camera.AcquisitionStop.Execute(); // if( verbose ) { std::cerr << "basler-cat: aquisition stopped" << std::endl; } //} // todo: a hack for now GenApi::IEnumEntry* acquisitionStart = camera.TriggerSelector.GetEntry( Basler_GigECamera::TriggerSelector_AcquisitionStart ); if( acquisitionStart && GenApi::IsAvailable( acquisitionStart ) ) { camera.TriggerSelector.SetValue( Basler_GigECamera::TriggerSelector_AcquisitionStart ); camera.TriggerMode.SetValue( frame_trigger.empty() ? Basler_GigECamera::TriggerMode_Off : Basler_GigECamera::TriggerMode_On ); } GenApi::IEnumEntry* frameStart = camera.TriggerSelector.GetEntry( Basler_GigECamera::TriggerSelector_FrameStart ); if( frameStart && GenApi::IsAvailable( frameStart ) ) { //if( frame_trigger.empty() ) { frame_trigger = line_trigger; } if( frame_trigger.empty() ) { camera.TriggerSelector.SetValue( Basler_GigECamera::TriggerSelector_FrameStart ); camera.TriggerMode.SetValue( Basler_GigECamera::TriggerMode_Off ); } else { camera.TriggerSelector.SetValue( Basler_GigECamera::TriggerSelector_FrameStart ); camera.TriggerMode.SetValue( Basler_GigECamera::TriggerMode_On ); Basler_GigECamera::TriggerSourceEnums t; if( frame_trigger == "line1" ) { camera.TriggerSource.SetValue( Basler_GigECamera::TriggerSource_Line1 ); } if( frame_trigger == "line2" ) { camera.TriggerSource.SetValue( Basler_GigECamera::TriggerSource_Line2 ); } if( frame_trigger == "line3" ) { camera.TriggerSource.SetValue( Basler_GigECamera::TriggerSource_Line3 ); } else if( frame_trigger == "encoder" ) { camera.TriggerSource.SetValue( Basler_GigECamera::TriggerSource_ShaftEncoderModuleOut ); } else { std::cerr << "basler-cat: frame trigger '" << frame_trigger << "' not implemented or invalid" << std::endl; return 1; } camera.TriggerActivation.SetValue( Basler_GigECamera::TriggerActivation_RisingEdge ); camera.TriggerSelector.SetValue( Basler_GigECamera::TriggerSelector_LineStart ); camera.TriggerMode.SetValue( Basler_GigECamera::TriggerMode_On ); camera.TriggerActivation.SetValue( Basler_GigECamera::TriggerActivation_RisingEdge ); if( frame_trigger == "encoder" ) { // todo: make configurable camera.ShaftEncoderModuleLineSelector.SetValue( Basler_GigECamera::ShaftEncoderModuleLineSelector_PhaseA ); camera.ShaftEncoderModuleLineSource.SetValue( Basler_GigECamera::ShaftEncoderModuleLineSource_Line1 ); camera.ShaftEncoderModuleLineSelector.SetValue( Basler_GigECamera::ShaftEncoderModuleLineSelector_PhaseB ); camera.ShaftEncoderModuleLineSource.SetValue( Basler_GigECamera::ShaftEncoderModuleLineSource_Line2 ); camera.ShaftEncoderModuleCounterMode.SetValue( Basler_GigECamera::ShaftEncoderModuleCounterMode_FollowDirection ); camera.ShaftEncoderModuleMode.SetValue( Basler_GigECamera::ShaftEncoderModuleMode_ForwardOnly ); camera.ShaftEncoderModuleCounterMax.SetValue( encoder_ticks - 1 ); /// @todo compensate for mechanical jitter, if needed /// see Runner_Users_manual.pdf, 8.3, Case 2 camera.ShaftEncoderModuleReverseCounterMax.SetValue( 0 ); camera.ShaftEncoderModuleCounterReset.Execute(); camera.ShaftEncoderModuleReverseCounterReset.Execute(); } } } GenApi::IEnumEntry* lineStart = camera.TriggerSelector.GetEntry( Basler_GigECamera::TriggerSelector_LineStart ); if( lineStart && GenApi::IsAvailable( lineStart ) ) { if( line_trigger.empty() ) { camera.TriggerSelector.SetValue( Basler_GigECamera::TriggerSelector_LineStart ); camera.TriggerMode.SetValue( Basler_GigECamera::TriggerMode_Off ); } else { camera.TriggerSelector.SetValue( Basler_GigECamera::TriggerSelector_LineStart ); camera.TriggerMode.SetValue( Basler_GigECamera::TriggerMode_On ); Basler_GigECamera::TriggerSourceEnums t; if( line_trigger == "line1" ) { camera.TriggerSource.SetValue( Basler_GigECamera::TriggerSource_Line1 ); } else if( line_trigger == "line2" ) { camera.TriggerSource.SetValue( Basler_GigECamera::TriggerSource_Line2 ); } else if( line_trigger == "line3" ) { camera.TriggerSource.SetValue( Basler_GigECamera::TriggerSource_Line3 ); } else if( line_trigger == "encoder" ) { camera.TriggerSource.SetValue( Basler_GigECamera::TriggerSource_ShaftEncoderModuleOut ); } else { std::cerr << "basler-cat: line trigger '" << line_trigger << "' not implemented or invalid" << std::endl; return 1; } camera.TriggerActivation.SetValue( Basler_GigECamera::TriggerActivation_RisingEdge ); camera.TriggerSelector.SetValue( Basler_GigECamera::TriggerSelector_LineStart ); camera.TriggerMode.SetValue( Basler_GigECamera::TriggerMode_On ); camera.TriggerActivation.SetValue( Basler_GigECamera::TriggerActivation_RisingEdge ); } } if( chunk_mode ) { std::cerr << "basler-cat: setting chunk mode..." << std::endl; if( !GenApi::IsWritable( camera.ChunkModeActive ) ) { std::cerr << "basler-cat: camera does not support chunk features" << std::endl; camera.Close(); return 1; } camera.ChunkModeActive.SetValue( true ); camera.ChunkSelector.SetValue( Basler_GigECameraParams::ChunkSelector_Framecounter ); camera.ChunkEnable.SetValue( true ); camera.ChunkSelector.SetValue( Basler_GigECameraParams::ChunkSelector_Timestamp ); camera.ChunkEnable.SetValue( true ); camera.ChunkSelector.SetValue( Basler_GigECameraParams::ChunkSelector_LineTriggerIgnoredCounter ); camera.ChunkEnable.SetValue( true ); camera.ChunkSelector.SetValue( Basler_GigECameraParams::ChunkSelector_FrameTriggerIgnoredCounter ); camera.ChunkEnable.SetValue( true ); camera.ChunkSelector.SetValue( Basler_GigECameraParams::ChunkSelector_LineTriggerEndToEndCounter ); camera.ChunkEnable.SetValue( true ); camera.ChunkSelector.SetValue( Basler_GigECameraParams::ChunkSelector_FrameTriggerCounter ); camera.ChunkEnable.SetValue( true ); camera.ChunkSelector.SetValue( Basler_GigECameraParams::ChunkSelector_FramesPerTriggerCounter ); camera.ChunkEnable.SetValue( true ); parser = camera.CreateChunkParser(); if( !parser ) { std::cerr << "basler-cat: failed to create chunk parser" << std::endl; camera.Close(); return 1; } std::cerr << "basler-cat: set chunk mode" << std::endl; } camera.ExposureMode.SetValue( Basler_GigECamera::ExposureMode_Timed ); if( vm.count( "exposure" ) ) { camera.ExposureTimeRaw.SetValue( exposure ); } // todo? auto exposure (see ExposureAutoEnums) if( vm.count( "gain" ) ) { camera.GainSelector.SetValue( Basler_GigECamera::GainSelector_All ); camera.GainRaw.SetValue( gain ); if( channels == 3 ) // todo: make configurable; also is not setting all not enough? { camera.GainSelector.SetValue( Basler_GigECamera::GainSelector_Red ); camera.GainRaw.SetValue( gain ); camera.GainSelector.SetValue( Basler_GigECamera::GainSelector_Green ); camera.GainRaw.SetValue( gain ); camera.GainSelector.SetValue( Basler_GigECamera::GainSelector_Blue ); camera.GainRaw.SetValue( gain ); } } if( vm.count( "line-rate" ) ) { camera.AcquisitionLineRateAbs.SetValue( line_rate ); } if( vm.count( "test-colour" ) ) { camera.TestImageSelector.SetValue( Basler_GigECamera::TestImageSelector_Testimage6 ); } else { camera.TestImageSelector.SetValue( Basler_GigECamera::TestImageSelector_Off ); } unsigned int payload_size = camera.PayloadSize.GetValue(); if( verbose ) { std::cerr << "basler-cat: camera mtu size: " << camera.GevSCPSPacketSize.GetValue() << std::endl; std::cerr << "basler-cat: exposure: " << camera.ExposureTimeRaw.GetValue() << std::endl; std::cerr << "basler-cat: payload size: " << payload_size << std::endl; } std::vector< std::vector< char > > buffers( 2 ); // todo? make number of buffers configurable for( std::size_t i = 0; i < buffers.size(); ++i ) { buffers[i].resize( payload_size ); } grabber.MaxBufferSize.SetValue( buffers[0].size() ); grabber.SocketBufferSize.SetValue( 127 ); if( verbose ) { std::cerr << "basler-cat: socket buffer size: " << grabber.SocketBufferSize.GetValue() << std::endl; std::cerr << "basler-cat: max buffer size: " << grabber.MaxBufferSize.GetValue() << std::endl; } grabber.MaxNumBuffer.SetValue( buffers.size() ); // todo: use --buffer value for number of buffered images grabber.PrepareGrab(); // image size now must not be changed until FinishGrab() is called. std::vector< Pylon::StreamBufferHandle > buffer_handles( buffers.size() ); for( std::size_t i = 0; i < buffers.size(); ++i ) { buffer_handles[i] = grabber.RegisterBuffer( &buffers[i][0], buffers[i].size() ); grabber.QueueBuffer( buffer_handles[i], NULL ); } if( chunk_mode ) { snark::tbb::bursty_reader< ChunkPair > read( boost::bind( &capture_< ChunkPair >, boost::ref( camera ), boost::ref( grabber ) ), discard ); tbb::filter_t< ChunkPair, void > write( tbb::filter::serial_in_order, boost::bind( &write_, _1 ) ); snark::tbb::bursty_pipeline< ChunkPair > pipeline; camera.AcquisitionMode.SetValue( Basler_GigECamera::AcquisitionMode_Continuous ); camera.AcquisitionStart.Execute(); // continuous acquisition mode if( verbose ) { std::cerr << "basler-cat: running in chunk mode..." << std::endl; } pipeline.run( read, write ); if( verbose ) { std::cerr << "basler-cat: shutting down..." << std::endl; } camera.AcquisitionStop(); camera.DestroyChunkParser( parser ); } else { snark::cv_mat::serialization serialization( options ); snark::tbb::bursty_reader< Pair > reader( boost::bind( &capture_< Pair >, boost::ref( camera ), boost::ref( grabber ) ), discard ); snark::imaging::applications::pipeline pipeline( serialization, filters, reader ); camera.AcquisitionMode.SetValue( Basler_GigECamera::AcquisitionMode_Continuous ); camera.AcquisitionStart.Execute(); // continuous acquisition mode if( verbose ) { std::cerr << "basler-cat: running..." << std::endl; } pipeline.run(); if( verbose ) { std::cerr << "basler-cat: shutting down..." << std::endl; } camera.AcquisitionStop(); } if( verbose ) { std::cerr << "basler-cat: acquisition stopped" << std::endl; } is_shutdown = true; while( !done ) { boost::thread::sleep( boost::posix_time::microsec_clock::universal_time() + boost::posix_time::milliseconds( 100 ) ); } grabber.FinishGrab(); Pylon::GrabResult result; while( grabber.RetrieveResult( result ) ); // get all buffers back for( std::size_t i = 0; i < buffers.size(); ++i ) { grabber.DeregisterBuffer( buffer_handles[i] ); } grabber.Close(); camera.Close(); if( verbose ) { std::cerr << "basler-cat: done" << std::endl; } return 0; } catch( std::exception& ex ) { std::cerr << "basler-cat: " << ex.what() << std::endl; } catch( ... ) { std::cerr << "basler-cat: unknown exception" << std::endl; } return 1; }