void AudioOutputSpeech::addFrameToBuffer(const QByteArray &qbaPacket, unsigned int iSeq) { QMutexLocker lock(&qmJitter); if (qbaPacket.size() < 2) return; PacketDataStream pds(qbaPacket); // skip flags pds.next(); int samples = 0; if (umtType == MessageHandler::UDPVoiceOpus) { int size; pds >> size; size &= 0x1fff; const QByteArray &qba = pds.dataBlock(size); const unsigned char *packet = reinterpret_cast<const unsigned char*>(qba.constData()); #ifdef USE_OPUS int frames = opus_packet_get_nb_frames(packet, size); samples = frames * opus_packet_get_samples_per_frame(packet, SAMPLE_RATE); #else return; #endif // We can't handle frames which are not a multiple of 10ms. Q_ASSERT(samples % iFrameSize == 0); } else {
// Save the prepared API information. bool QsciAPIs::savePrepared(const QString &filename) const { QString pname = prepName(filename, true); if (pname.isEmpty()) return false; // Write the prepared data to a memory buffer. QByteArray pdata; QDataStream pds(&pdata, QIODevice::WriteOnly); // Use a serialisation format supported by Qt v3.0 and later. pds.setVersion(QDataStream::Qt_3_0); pds << PreparedDataFormatVersion; pds << lexer()->lexer(); pds << prep->wdict; pds << prep->raw_apis; // Compress the data and write it. QFile pf(pname); if (!pf.open(QIODevice::WriteOnly|QIODevice::Truncate)) return false; if (pf.write(qCompress(pdata)) < 0) { pf.close(); return false; } pf.close(); return true; }
/** * A função executa_comando é uma das funções mais importantes pois é ela que vai reagir aos comandos dados pelo utilizador. * A função irá receber a linha de comando separa la em dois, no comando e nos argumentos do comando, e passa los para as funções associadas aos comandos. * @param Tab Tabuleiro do jogo * @param DIM dimensão do tabuleiro * @param linha Recebe uma string que corresponde á linha lida da shell. * @returns torna um inteiro que verifica se foi ou não bem sucedida a função. */ int executa_comando(char *linha,int *DIM,Elem **Tab) { char cmd[1025]; char args[1025]; int nargs=sscanf(linha, "%s %[^\n]", cmd, args); if((strcmp(cmd, "b") == 0 || strcmp(cmd, "p") == 0 || strcmp(cmd, "i") == 0)) return executaJogada(args,cmd,DIM,Tab); if(strcmp(cmd, "cr") == 0 ) return executaCriaJogo(args,DIM,Tab); if(strcmp(cmd,"gr")==0 ) return executaGravaJogo(args,DIM,Tab); if(strcmp(cmd,"?")==0 && nargs==1) return ajuda(); if(strcmp(cmd,"trp")==0 && nargs==1) return trp(Tab,DIM); if(strcmp(cmd,"snd")==0 && nargs==1) return snd(Tab,DIM); if(strcmp(cmd,"pis")==0 && nargs==1) return pis(Tab,DIM); if(strcmp(cmd,"pds")==0 && nargs==1) return pds(Tab,DIM); if(strcmp(cmd,"vb")==0 && nargs==1) return vb(Tab,DIM); if(strcmp(cmd,"vp")==0 && nargs==1) return vp(Tab,DIM); if(strcmp(cmd,"vl")==0 && nargs==1) return vl(Tab,DIM); if(strcmp(cmd,"q")==0) exit(0); return mensagem_de_erro(E_COMMAND); }
void AudioInput::flushCheck(const QByteArray &frame, bool terminator) { qlFrames << frame; if (! terminator && iBufferedFrames < iAudioFrames) return; int flags = g.iTarget; if (terminator) flags = g.iPrevTarget; if (g.s.lmLoopMode == Settings::Server) flags = 0x1f; // Server loopback flags |= (umtType << 5); char data[1024]; data[0] = static_cast<unsigned char>(flags); int frames = iBufferedFrames; iBufferedFrames = 0; PacketDataStream pds(data + 1, 1023); // Sequence number pds << iFrameCounter - frames; if (umtType == MessageHandler::UDPVoiceOpus) { const QByteArray &qba = qlFrames.takeFirst(); int size = qba.size(); if (terminator) size |= 1 << 13; pds << size; pds.append(qba.constData(), qba.size()); } else { if (terminator) { qlFrames << QByteArray(); ++frames; } for (int i = 0; i < frames; ++i) { const QByteArray &qba = qlFrames.takeFirst(); unsigned char head = static_cast<unsigned char>(qba.size()); if (i < frames - 1) head |= 0x80; pds.append(head); pds.append(qba.constData(), qba.size()); } } if (g.s.bTransmitPosition && g.p && ! g.bCenterPosition && g.p->fetch()) { pds << g.p->fPosition[0]; pds << g.p->fPosition[1]; pds << g.p->fPosition[2]; } sendAudioFrame(data, pds); Q_ASSERT(qlFrames.isEmpty()); }
// ========================================================================= Pruning_table_ds_el:: Pruning_table_ds_el(Transform_table *tr, Turn_table *tn, int e1): e1(e1) // ------------------------------------------------------------------------- { int n, d, mx = 0; int ds, el, t; unsigned int sm = 0; Pack_dse pds(e1); int Nds = pds.len(); int Sds = pds.startlen(); Pack_el pel(e1); int Nel = pel.len(); int Sel = pel.startlen(); for (ds = 0; ds < Nds; ds++) for (el = 0; el < Nel; el++) a[ds][el] = BIG; for (t = 0; t < A_N_TW; t++) if (mx < tn->a_len(t)) mx = tn->a_len(t); fprintf(stderr, "\n - downslice edge positions - middle edge locations (phase 1, %i x %i goal/s):\n", Sds, Sel); for (ds = 0; ds < Sds; ds++) for (el = 0; el < Sel; el++) a[pds.start(ds)][pel.start(el)] = 0; d = 0; n = Sds * Sel; while (n) { fprintf(stderr," %2i %8i %10u\n", d, n, sm += n); n = 0; d++; for (ds = 0; ds < Nds; ds++) { for (el = 0; el < Nel; el++) { if (d <= a[ds][el] || d > a[ds][el] + mx) continue; for (int t = 0; t < A_N_TW; t++) { if (d == a[ds][el] + tn->a_len(t)) { int nds = tr->se->a_do_d_tw(t, ds); int nel = tr->el->a_do_tw(t, el); if (a[nds][nel] > d) { a[nds][nel] = d; n++; } } } } } } }
void AudioInput::flushCheck(const QByteArray &frame, bool terminator) { qlFrames << frame; if (! terminator && qlFrames.count() < iAudioFrames) return; int flags = g.iTarget; if (terminator) flags = g.iPrevTarget; if (g.s.lmLoopMode == Settings::Server) flags = 0x1f; // Server loopback flags |= (umtType << 5); char data[1024]; data[0] = static_cast<unsigned char>(flags); PacketDataStream pds(data + 1, 1023); pds << iFrameCounter - qlFrames.count(); if (terminator) qlFrames << QByteArray(); for (int i=0;i<qlFrames.count(); ++i) { const QByteArray &qba = qlFrames.at(i); unsigned char head = static_cast<unsigned char>(qba.size()); if (i < qlFrames.count() - 1) head |= 0x80; pds.append(head); pds.append(qba.constData(), qba.size()); } if (g.s.bTransmitPosition && g.p && ! g.bCenterPosition && g.p->fetch()) { pds << g.p->fPosition[0]; pds << g.p->fPosition[1]; pds << g.p->fPosition[2]; } ServerHandlerPtr sh = g.sh; if (sh) { VoiceRecorderPtr recorder(sh->recorder); if (recorder) recorder->getRecordUser().addFrame(QByteArray(data, pds.size() + 1)); } if (g.s.lmLoopMode == Settings::Local) LoopUser::lpLoopy.addFrame(QByteArray(data, pds.size() + 1)); else if (sh) sh->sendMessage(data, pds.size() + 1); qlFrames.clear(); }
void AudioInput::flushCheck(const QByteArray &frame, bool terminator) { qlFrames.push_back(frame); if (! terminator && qlFrames.size() < iAudioFrames) return; int flags = g_struct.iTarget; if (terminator) flags = g_struct.iPrevTarget; flags |= (umtType << 5); char data[1024]; data[0] = static_cast<unsigned char>(flags); PacketDataStream pds(data + 1, 1023); pds << iFrameCounter - qlFrames.size(); if (terminator) qlFrames.push_back(QByteArray()); for (int i=0;i<qlFrames.size(); ++i) { const QByteArray &qba = qlFrames.at(i); unsigned char head = static_cast<unsigned char>(qba.size()); if (i < qlFrames.size() - 1) head |= 0x80; pds.append(head); pds.append((char*)(&qba[0]), qba.size()); } //test //g_struct.s.lmLoopMode = Settings::Local; if (g_struct.s.lmLoopMode == Settings::Local) { //如果是本地预览声音,则添加到本地预览声音缓存 QByteArray qba; for(int i=0; i<pds.size()+1; i++) { qba.push_back(data[i]); } LoopUser::lpLoopy.addFrame(qba); } else if (g_struct.trans) { //获得到数据,准备发送 123+1 g_struct.trans->SendAudioData(data,pds.size() + 1); } qlFrames.clear(); }
void MumbleClient::processIncomingAudioPacket(quint8 *data, quint64 size, quint8 type) { PacketDataStream pds(data+1, size-1); quint64 seq_number; quint64 session; quint8 audio_head; pds >> session; pds >> seq_number; pds >> audio_head; //audio_head &= 0x1fff; if(type == 1) // Received UDPTunnel { type = audio_head >> 5; }
void AudioOutputSpeech::addFrameToBuffer(const QByteArray &qbaPacket, unsigned int iSeq) { QMutexLocker lock(&qmJitter); if (qbaPacket.size() < 2) return; PacketDataStream pds(qbaPacket); pds.next(); int frames = 0; unsigned int header = 0; do { header = static_cast<unsigned char>(pds.next()); frames++; pds.skip(header & 0x7f); } while ((header & 0x80) && pds.isValid()); if (pds.isValid()) { JitterBufferPacket jbp; jbp.data = const_cast<char *>(qbaPacket.constData()); jbp.len = qbaPacket.size(); jbp.span = iFrameSize * frames; jbp.timestamp = iFrameSize * iSeq; #ifdef REPORT_JITTER if (g.s.bUsage && (umtType != MessageHandler::UDPVoiceSpeex) && p && ! p->qsHash.isEmpty() && (p->qlTiming.count() < 3000)) { QMutexLocker qml(& p->qmTiming); ClientUser::JitterRecord jr; jr.iSequence = iSeq; jr.iFrames = frames; jr.uiElapsed = p->tTiming.restart(); if (! p->qlTiming.isEmpty()) { jr.iFrames -= p->iFrames; jr.iSequence -= p->iSequence + p->iFrames; } p->iFrames = frames; p->iSequence = iSeq; p->qlTiming.append(jr); } #endif jitter_buffer_put(jbJitter, &jbp); } }
BOOL CGenericATAController::EnumerateDevices(PSP_DEVINFO_DATA pDevInfoData) { CDeviceSetup controller(pDevInfoData); //primary device SP_DEVINFO_DATA pcDevInfoData; controller.GetChild(&pcDevInfoData); //primary ide channel CDeviceSetup primaryChannel(pcDevInfoData); //primary device SP_DEVINFO_DATA pdDevInfoData; if(primaryChannel.GetChild(&pdDevInfoData)!=0) { CDeviceSetup pds(pdDevInfoData); CATADevice *device=new CGenericATADevice(pds.GetDevicePath().c_str(),*this); m_deviceList.push_back(device); m_ataDeviceList.push_back(device); } //secondary device SP_DEVINFO_DATA scDevInfoData; primaryChannel.GetSibling(&scDevInfoData); //secondary ide channel CDeviceSetup secondaryChannel(scDevInfoData); SP_DEVINFO_DATA sdDevInfoData; if(secondaryChannel.GetChild(&sdDevInfoData)!=0) { CDeviceSetup sds(sdDevInfoData); CATADevice *device=new CGenericATADevice(sds.GetDevicePath().c_str(),*this); m_deviceList.push_back(device); m_ataDeviceList.push_back(device); } return 1; }
int main(int argc, char ** argv){ std::string outname; if(argc == 2){ outname = std::string(argv[1]); } else{ outname = "out.ply"; } std::vector<Eigen::Vector3d> points; PoissonDiskSampling pds(width, height, 1.0); UniformConverter converter(10.0); // distance between points should 10.0 pds.setConverter(converter); pds.sample(points); savePly("pds.ply", points); GeomRenderer gr(points); gr.render(width, height); gr.save("pds.png"); }
//从音频队列中取帧 void LoopUser::fetchFrames() { MutexLocker l(&qmLock); AudioOutputPtr ao = g_struct.ao; if (!ao || qmPackets.empty()) { return; } double cmp = qtTicker.elapsed(); std::multimap<float, QByteArray>::iterator i = qmPackets.begin(); while (i != qmPackets.end()) { // if (i->first > cmp) // { // char buf[512] = {0}; // sprintf(buf,"LoopUser::fetchFrames() cmp=%f\n",i->first ); // OutputDebugStringA(buf); // break; // } const QByteArray &data = i->second; PacketDataStream pds((char*)&data[0], data.size()); unsigned int msgFlags = 0; int iSeq = static_cast<unsigned int>(pds.next8()); QByteArray qba; pds.dataBlock(pds.left(), qba); ao->addFrameToBuffer(this, qba, iSeq, MessageHandler::UDPVoiceAACPlus/*MessageHandler::UDPVoiceCELT*/); i = qmPackets.erase(i); } qtLastFetch.restart(); }
void MumbleClient::createVoicePacket(unsigned char *encoded_audio, int packet_size) { int type = 0; if(_settings->_use_codec2) type |= (5 << 5); else type |= (4 << 5); int data_size = 1024; char data[data_size]; data[0] = static_cast<unsigned char>(type); PacketDataStream pds(data + 1, data_size-1); int nr_of_frames = opus_packet_get_nb_frames(encoded_audio,packet_size); // sequence? pds << _sequence_number; int real_packet_size = packet_size; _sequence_number +=nr_of_frames; //packet_size |= 1 << 13; pds << packet_size; char *audio_packet = reinterpret_cast<char*>(encoded_audio); pds.append(audio_packet,real_packet_size); unsigned char *bin_data = reinterpret_cast<unsigned char*>(data); if(_settings->_mumble_tcp) // TCP tunnel { this->sendMessage(bin_data,1,pds.size()+1); } else // Use UDP { this->sendUDPMessage(bin_data,pds.size()+1); } }
// Load the prepared API information. bool QsciAPIs::loadPrepared(const QString &filename) { QString pname = prepName(filename); if (pname.isEmpty()) return false; // Read the prepared data and decompress it. QFile pf(pname); if (!pf.open(QIODevice::ReadOnly)) return false; QByteArray cpdata = pf.readAll(); pf.close(); if (cpdata.count() == 0) return false; QByteArray pdata = qUncompress(cpdata); // Extract the data. QDataStream pds(pdata); unsigned char vers; pds >> vers; if (vers > PreparedDataFormatVersion) return false; char *lex_name; pds >> lex_name; if (qstrcmp(lex_name, lexer()->lexer()) != 0) { delete[] lex_name; return false; } delete[] lex_name; prep->wdict.clear(); pds >> prep->wdict; if (!lexer()->caseSensitive()) { // Build up the case dictionary. prep->cdict.clear(); QMap<QString, WordIndexList>::const_iterator it = prep->wdict.begin(); while (it != prep->wdict.end()) { prep->cdict[it.key().toUpper()] = it.key(); ++it; } } prep->raw_apis.clear(); pds >> prep->raw_apis; // Allow the raw API information to be modified. apis = prep->raw_apis; return true; }
bool AudioOutputSpeech::needSamples(unsigned int snum) { for (unsigned int i=iLastConsume;i<iBufferFilled;++i) pfBuffer[i-iLastConsume]=pfBuffer[i]; iBufferFilled -= iLastConsume; iLastConsume = snum; if (iBufferFilled >= snum) return bLastAlive; float *pOut; STACKVAR(float, fOut, iFrameSize + 4096); bool nextalive = bLastAlive; while (iBufferFilled < snum) { resizeBuffer(iBufferFilled + iOutputSize); pOut = (srs) ? fOut : (pfBuffer + iBufferFilled); if (! bLastAlive) { memset(pOut, 0, iFrameSize * sizeof(float)); } else { if (p == LoopUser::lpLoopy) { LoopUser::lpLoopy->fetchFrames(); } int avail = 0; int ts = jitter_buffer_get_pointer_timestamp(jbJitter); jitter_buffer_ctl(jbJitter, JITTER_BUFFER_GET_AVAILABLE_COUNT, &avail); if (p && (ts == 0)) { int want = iroundf(p->fAverageAvailable); if (avail < want) { ++iMissCount; if (iMissCount < 20) { memset(pOut, 0, iFrameSize * sizeof(float)); goto nextframe; } } } if (qlFrames.isEmpty()) { QMutexLocker lock(&qmJitter); char data[4096]; JitterBufferPacket jbp; jbp.data = data; jbp.len = 4096; spx_int32_t startofs = 0; if (jitter_buffer_get(jbJitter, &jbp, iFrameSize, &startofs) == JITTER_BUFFER_OK) { PacketDataStream pds(jbp.data, jbp.len); iMissCount = 0; ucFlags = static_cast<unsigned char>(pds.next()); bHasTerminator = false; unsigned int header = 0; do { header = static_cast<unsigned int>(pds.next()); if (header) qlFrames << pds.dataBlock(header & 0x7f); else bHasTerminator = true; } while ((header & 0x80) && pds.isValid()); if (pds.left()) { pds >> fPos[0]; pds >> fPos[1]; pds >> fPos[2]; } else { fPos[0] = fPos[1] = fPos[2] = 0.0f; } if (p) { float a = static_cast<float>(avail); if (avail >= p->fAverageAvailable) p->fAverageAvailable = a; else p->fAverageAvailable *= 0.99f; } } else {