void HTTPTracker::onScrapeResult(KIO::Job* j) { if (j->error()) { Out(SYS_TRK|LOG_IMPORTANT) << "Scrape failed : " << j->errorString() << endl; return; } KIO::StoredTransferJob* st = (KIO::StoredTransferJob*)j; BDecoder dec(st->data(),false,0); BNode* n = 0; try { n = dec.decode(); } catch (bt::Error & err) { Out(SYS_TRK|LOG_IMPORTANT) << "Invalid scrape data " << err.toString() << endl; return; } if (n && n->getType() == BNode::DICT) { BDictNode* d = (BDictNode*)n; d = d->getDict("files"); if (d) { d = d->getDict(tor->getInfoHash().toByteArray()); if (d) { BValueNode* vn = d->getValue("complete"); if (vn && vn->data().getType() == Value::INT) { seeders = vn->data().toInt(); } vn = d->getValue("incomplete"); if (vn && vn->data().getType() == Value::INT) { leechers = vn->data().toInt(); } Out(SYS_TRK|LOG_DEBUG) << "Scrape : leechers = " << leechers << ", seeders = " << seeders << endl; } } } delete n; }
void UTPex::handlePexPacket(const Uint8* packet,Uint32 size) { if (size <= 2 || packet[1] != 1) return; QByteArray tmp; tmp.setRawData((const char*)packet,size); BNode* node = 0; try { BDecoder dec(tmp,false,2); node = dec.decode(); if (node && node->getType() == BNode::DICT) { BDictNode* dict = (BDictNode*)node; // ut_pex packet, emit signal to notify PeerManager BValueNode* val = dict->getValue("added"); if (val) { QByteArray data = val->data().toByteArray(); peer->emitPex(data); } } } catch (...) { // just ignore invalid packets Out(SYS_CON|LOG_DEBUG) << "Invalid extended packet" << endl; } delete node; tmp.resetRawData((const char*)packet,size); }
void HTTPTracker::onScrapeResult(KJob* j) { if (j->error()) { Out(SYS_TRK | LOG_IMPORTANT) << "Scrape failed : " << j->errorString() << endl; return; } KIO::StoredTransferJob* st = (KIO::StoredTransferJob*)j; BDecoder dec(st->data(), false, 0); BNode* n = 0; try { n = dec.decode(); } catch (bt::Error & err) { Out(SYS_TRK | LOG_IMPORTANT) << "Invalid scrape data " << err.toString() << endl; return; } if (n && n->getType() == BNode::DICT) { BDictNode* d = (BDictNode*)n; d = d->getDict(QString("files")); if (d) { d = d->getDict(tds->infoHash().toByteArray()); if (d) { try { seeders = d->getInt("complete"); leechers = d->getInt("incomplete"); total_downloaded = d->getInt("downloaded"); supports_partial_seed_extension = d->getValue("downloaders") != 0; Out(SYS_TRK | LOG_DEBUG) << "Scrape : leechers = " << leechers << ", seeders = " << seeders << ", downloaded = " << total_downloaded << endl; } catch (...) {} scrapeDone(); if (status == bt::TRACKER_ERROR) { status = bt::TRACKER_OK; failures = 0; } } } } delete n; }
void TorrentGroup::load(bt::BDictNode* dn) { name = QString::fromLocal8Bit(dn->getByteArray("name")); setIconByName(QString::fromLocal8Bit(dn->getByteArray("icon"))); BListNode* ln = dn->getList("hashes"); if (!ln) return; path = "/all/custom/" + name; for (Uint32 i = 0; i < ln->getNumChildren(); i++) { QByteArray ba = ln->getByteArray(i); if (ba.size() != 20) continue; hashes.insert(SHA1Hash((const Uint8*)ba.data())); } BDictNode* gp = dn->getDict(QString("policy")); if (gp) { // load the group policy if (gp->getValue("default_save_location")) { policy.default_save_location = gp->getString("default_save_location", 0); if (policy.default_save_location.length() == 0) policy.default_save_location = QString(); // make sure that 0 length strings are loaded as null strings } if (gp->getValue("default_move_on_completion_location")) { policy.default_move_on_completion_location = gp->getString("default_move_on_completion_location", 0); if (policy.default_move_on_completion_location.length() == 0) policy.default_move_on_completion_location = QString(); // make sure that 0 length strings are loaded as null strings } if (gp->getValue("max_share_ratio")) policy.max_share_ratio = gp->getString("max_share_ratio", 0).toFloat(); if (gp->getValue("max_seed_time")) policy.max_seed_time = gp->getString("max_seed_time", 0).toFloat(); if (gp->getValue("max_upload_rate")) policy.max_upload_rate = gp->getInt("max_upload_rate"); if (gp->getValue("max_download_rate")) policy.max_download_rate = gp->getInt("max_download_rate"); if (gp->getValue("only_apply_on_new_torrents")) policy.only_apply_on_new_torrents = gp->getInt("only_apply_on_new_torrents"); } }
bool HTTPTracker::updateData(const QByteArray & data) { //#define DEBUG_PRINT_RESPONSE #ifdef DEBUG_PRINT_RESPONSE Out() << "Data : " << endl; Out() << QString(data) << endl; #endif // search for dictionary, there might be random garbage infront of the data Uint32 i = 0; while (i < data.size()) { if (data[i] == 'd') break; i++; } if (i == data.size()) { failures++; requestFailed(i18n("Invalid response from tracker")); return false; } BDecoder dec(data,false,i); BNode* n = 0; try { n = dec.decode(); } catch (...) { failures++; requestFailed(i18n("Invalid data from tracker")); return false; } if (!n || n->getType() != BNode::DICT) { failures++; requestFailed(i18n("Invalid response from tracker")); return false; } BDictNode* dict = (BDictNode*)n; if (dict->getData("failure reason")) { BValueNode* vn = dict->getValue("failure reason"); QString msg = vn->data().toString(); delete n; failures++; requestFailed(msg); return false; } BValueNode* vn = dict->getValue("interval"); // if no interval is specified, use 5 minutes if (vn) interval = vn->data().toInt(); else interval = 5 * 60; vn = dict->getValue("incomplete"); if (vn) leechers = vn->data().toInt(); vn = dict->getValue("complete"); if (vn) seeders = vn->data().toInt(); BListNode* ln = dict->getList("peers"); if (!ln) { // no list, it might however be a compact response vn = dict->getValue("peers"); if (!vn) { delete n; failures++; requestFailed(i18n("Invalid response from tracker")); return false; } QByteArray arr = vn->data().toByteArray(); for (Uint32 i = 0; i < arr.size(); i+=6) { Uint8 buf[6]; for (int j = 0; j < 6; j++) buf[j] = arr[i + j]; addPeer(QHostAddress(ReadUint32(buf,0)).toString(),ReadUint16(buf,4)); } } else { for (Uint32 i = 0; i < ln->getNumChildren(); i++) { BDictNode* dict = dynamic_cast<BDictNode*>(ln->getChild(i)); if (!dict) continue; BValueNode* ip_node = dict->getValue("ip"); BValueNode* port_node = dict->getValue("port"); if (!ip_node || !port_node) continue; addPeer(ip_node->data().toString(),port_node->data().toInt()); } } delete n; return true; }
bool HTTPTracker::updateData(const QByteArray & data) { //#define DEBUG_PRINT_RESPONSE #ifdef DEBUG_PRINT_RESPONSE Out(SYS_TRK | LOG_DEBUG) << "Data : " << endl; Out(SYS_TRK | LOG_DEBUG) << QString(data) << endl; #endif // search for dictionary, there might be random garbage infront of the data int i = 0; while (i < data.size()) { if (data[i] == 'd') break; i++; } if (i == data.size()) { failures++; failed(i18n("Invalid response from tracker")); return false; } BDecoder dec(data, false, i); BNode* n = 0; try { n = dec.decode(); } catch (...) { failures++; failed(i18n("Invalid data from tracker")); return false; } if (!n || n->getType() != BNode::DICT) { failures++; failed(i18n("Invalid response from tracker")); return false; } BDictNode* dict = (BDictNode*)n; if (dict->getData("failure reason")) { BValueNode* vn = dict->getValue("failure reason"); error = vn->data().toString(); delete n; failures++; failed(error); return false; } if (dict->getData("warning message")) { BValueNode* vn = dict->getValue("warning message"); warning = vn->data().toString(); } else warning.clear(); BValueNode* vn = dict->getValue("interval"); // if no interval is specified, use 5 minutes if (vn) interval = vn->data().toInt(); else interval = 5 * 60; vn = dict->getValue("incomplete"); if (vn) leechers = vn->data().toInt(); vn = dict->getValue("complete"); if (vn) seeders = vn->data().toInt(); BListNode* ln = dict->getList("peers"); if (!ln) { // no list, it might however be a compact response vn = dict->getValue("peers"); if (vn && vn->data().getType() == Value::STRING) { QByteArray arr = vn->data().toByteArray(); for (int i = 0;i < arr.size();i += 6) { Uint8 buf[6]; for (int j = 0;j < 6;j++) buf[j] = arr[i + j]; Uint32 ip = ReadUint32(buf, 0); addPeer(net::Address(ip, ReadUint16(buf, 4)), false); } } } else { for (Uint32 i = 0;i < ln->getNumChildren();i++) { BDictNode* dict = dynamic_cast<BDictNode*>(ln->getChild(i)); if (!dict) continue; BValueNode* ip_node = dict->getValue("ip"); BValueNode* port_node = dict->getValue("port"); if (!ip_node || !port_node) continue; net::Address addr(ip_node->data().toString(), port_node->data().toInt()); addPeer(addr, false); } } // Check for IPv6 compact peers vn = dict->getValue("peers6"); if (vn && vn->data().getType() == Value::STRING) { QByteArray arr = vn->data().toByteArray(); for (int i = 0;i < arr.size();i += 18) { Q_IPV6ADDR ip; memcpy(ip.c, arr.data() + i, 16); quint16 port = ReadUint16((const Uint8*)arr.data() + i, 16); addPeer(net::Address(ip, port), false); } } delete n; return true; }