void GaduFileTransfer::updateFileInfo() { if (SocketNotifiers) { setFileSize(SocketNotifiers->fileSize()); setTransferredSize(SocketNotifiers->transferredFileSize()); } else { setFileSize(0); setTransferredSize(0); } // emit statusChanged(); }
//------------------------------------------------------------------------------ // Reinitializes ColBuf buffer, and resets // file offset data member attributes where new extent will start. //------------------------------------------------------------------------------ int ColumnInfoCompressed::resetFileOffsetsNewExtent(const char* hdr) { setFileSize( curCol.dataFile.hwm, false ); long long byteOffset = (long long)curCol.dataFile.hwm * (long long)BYTE_PER_BLOCK; fSizeWritten = byteOffset; fSizeWrittenStart = fSizeWritten; availFileSize = fileSize - fSizeWritten; // If we are adding an extent as part of preliminary block skipping, then // we won't have a ColumnBufferManager object yet, but that's okay, because // we are only adding the empty extent at this point. if (fColBufferMgr) { RETURN_ON_ERROR( fColBufferMgr->setDbFile(curCol.dataFile.pFile, curCol.dataFile.hwm, hdr) ); // Reinitialize ColBuf for the next extent long long startFileOffset; RETURN_ON_ERROR( fColBufferMgr->resetToBeCompressedColBuf( startFileOffset ) ); // Set the file offset to point to the chunk we are adding or updating RETURN_ON_ERROR( colOp->setFileOffset(curCol.dataFile.pFile, startFileOffset) ); } return NO_ERROR; }
void Image::setUrl(const QString &u) { setFileSize(0); emit urlChanged(m_url, u); m_url = u; refreshTokens(); }
directory_entry makeEntry(const char* nama, poi_attr_t atribut, JAM waktu, TANGGAL tanggal, uint16_t idx, uint32_t size){ directory_entry retval; setNama(&retval,nama); setattr(&retval,atribut); setLastModifTime(&retval,waktu); setLastModifDate(&retval,tanggal); setFirstDataBlockIdx(&retval,idx); setFileSize(&retval,size); return retval; }
void GaduFileTransferService::fileTransferReceived(Contact peer, QString downloadId, QString fileName) { auto transfer = FileTransfer::create(); transfer.setPeer(peer); transfer.setTransferDirection(FileTransferDirection::Incoming); transfer.setTransferType(FileTransferType::Url); transfer.setTransferStatus(FileTransferStatus::ReadyToDownload); transfer.setRemoteFileName(QUrl::fromPercentEncoding(fileName.toUtf8())); transfer.setFileSize(0); // we don't know file size yet transfer.addProperty("gg:downloadId", downloadId, CustomProperties::Storable); transfer.addProperty("gg:remoteFileName", fileName, CustomProperties::Storable); emit incomingFileTransfer(transfer); }
void GaduFileTransfer::setFileTransferNotifiers(DccSocketNotifiers *socketNotifiers) { if (!socketNotifiers) { socketNotAvailable(); return; } setRemoteFile(socketNotifiers->remoteFileName()); setFileSize(socketNotifiers->fileSize()); setTransferredSize(socketNotifiers->transferredFileSize()); SocketNotifiers = socketNotifiers; connect(SocketNotifiers, SIGNAL(destroyed(QObject *)), this, SLOT(socketNotifiersDeleted())); SocketNotifiers->setGaduFileTransfer(this); WaitingForSocketNotifiers = false; setRemoteFile(socketNotifiers->remoteFileName()); changeFileTransferStatus(FileTransfer::StatusTransfer); }
void JabberFileTransferService::fileReceived(QXmppTransferJob *transferJob) { auto jid = Jid::parse(transferJob->jid()); auto peer = m_contactManager->byId(m_account, jid.bare(), ActionCreateAndAdd); auto transfer = m_fileTransferStorage->create(); transfer.setPeer(peer); transfer.setTransferDirection(FileTransferDirection::Incoming); transfer.setTransferType(FileTransferType::Stream); transfer.setTransferStatus(FileTransferStatus::WaitingForAccept); transfer.setRemoteFileName(transferJob->fileName()); transfer.setFileSize(transferJob->fileSize()); if (!m_fileTransferHandlerManager->ensureHandler(transfer)) return; auto handler = qobject_cast<JabberStreamIncomingFileTransferHandler *>(transfer.handler()); if (handler) handler->setTransferJob(transferJob); emit incomingFileTransfer(transfer); }
void MetadataEditor::loadResult( const Tomahawk::result_ptr& result ) { if ( result.isNull() ) return; m_result = result; setEditable( result->collection() && result->collection()->source()->isLocal() ); setTitle( result->track()->track() ); setArtist( result->track()->artist() ); setAlbum( result->track()->album() ); setAlbumPos( result->track()->albumpos() ); setDuration( result->track()->duration() ); setYear( result->track()->year() ); setBitrate( result->bitrate() ); if ( result->collection() && result->collection()->source()->isLocal() ) { QString furl = m_result->url(); if ( furl.startsWith( "file://" ) ) furl = furl.right( furl.length() - 7 ); QFileInfo fi( furl ); setFileName( fi.absoluteFilePath() ); setFileSize( TomahawkUtils::filesizeToString( fi.size() ) ); } setWindowTitle( result->track()->track() ); if ( m_interface ) { m_index = m_interface->indexOfResult( result ); if ( m_index >= 0 ) enablePushButtons(); } }
void MetadataEditor::loadQuery( const Tomahawk::query_ptr& query ) { if ( query.isNull() ) return; if ( query->numResults() ) { loadResult( query->results().first() ); return; } m_result = Tomahawk::result_ptr(); m_query = query; setEditable( false ); setTitle( query->track()->track() ); setArtist( query->track()->artist() ); setAlbum( query->track()->album() ); setAlbumPos( query->track()->albumpos() ); setDuration( query->track()->duration() ); setYear( 0 ); setBitrate( 0 ); setFileName( QString() ); setFileSize( 0 ); setWindowTitle( query->track()->track() ); if ( m_interface ) { m_index = m_interface->indexOfQuery( query ); if ( m_index >= 0 ) enablePushButtons(); } }
int copy(void) { int i; int nb; char buf[4096]; char *s; char *spec; int first = 1; off_t actSize = 0; int ret = 1; int result; if ( totalSize < maxSize ) { maxSize = totalSize; } /* generate source file on client and target dir on server */ if (numberOp > 0 ) { for (i=0; i < numberOp;i++) { snprintf(buf,sizeof(buf),"sourceFile_%d",i); if ( createFile(buf) == 0) ret = 0; } } else { fprintf(stderr,"Wrong argument!\n"); return 0; } if ( ret && createDir(testDir) ) ret = 0; if ( ret && csv ) { printf("\"size\",\"write\",\"read\"\n"); } while (actSize <= maxSize && ret ) { if ( stepType == 'x' ) { if ( first ) { actSize = minSize; first = 0; } else { actSize *= stepSize; } } else { if ( first ) { actSize = minSize; first = 0; } else { actSize += stepSize; } } if ( actSize > maxSize ) break; if ( actSize > 0 ) nb = totalSize / actSize; if ( nb > maxFile ) nb = maxFile; if ( numberOp > 1 ) { int r = nb % numberOp; nb -= r; if ( nb == 0 ) { break; } } /* set size of source file(s) */ for (i=0; i < numberOp;i++) { snprintf(buf,sizeof(buf),"sourceFile_%d",i); setFileSize(buf,actSize); } s = sizeToName(actSize); spec = specToName(actSize, nb, csv); /* create all target file */ STARTTIME(); for(i=0;i < nb &&createFileBefore;i++) { snprintf(buf,sizeof(buf), "%s/%s_%d",testDir,s,i); if ( createFile(buf) == 0 ) { ret = 0; break; } } if ( printCreateTime && createFileBefore) { ENDTIME(); SETDT(); int createPerSecond = nb*1000000/dt; fprintf(stdout,"Make %-18s time %8.3f sec %8d F/s\n", spec, (double)dt/1000000, createPerSecond ); } if ( doSleep && createFileBefore) { sleep(doSleep); } /*************************/ /* copy source to target */ /*************************/ STARTTIME(); for ( i=0;i < nb ; i += numberOp ) { if ( numberOp == 1 ) { snprintf(buf,sizeof(buf), "%s/%s_%d",testDir,s,i); result = copyFile("sourceFile_0", buf); } else { snprintf(buf,sizeof(buf), "%s/%s_",testDir,s); result = copyNFile("sourceFile_", 0, buf, i, numberOp<nb?numberOp:nb); } if ( result == 0 ) { ret = 0; break; } } ENDTIME(); SETDT(); /* end of copy print out results */ if ( ! csv ) fprintf(stdout,"Write %-18s time %8.3f sec %12.3f MBps\n", spec, (double)dt/1000000.0, (double)(nb*actSize)/((double)dt/1000000.0)/(1024.0*1024.0) ); else fprintf(stdout,"\"%s\",\"%.3f\",", spec, (double)(nb*actSize)/((double)dt/1000000.0)/(1024.0*1024.0) ); /*******************/ /* check file size */ /*******************/ for( i=0;i < nb; i++) { struct stat st; snprintf(buf,sizeof(buf), "%s/%s_%d", testDir,s, i); if (stat(buf,&st) == -1) { fprintf(stderr,"File %s not found\n",buf); ret = 0; } else { if ( st.st_size != actSize ) { fprintf(stderr,"Wrong size (%llu) for file %s\n",st.st_size, buf); ret = 0; } } } if ( remount && ret ) { system(remount); } /*************************/ /* copy target to source */ /*************************/ STARTTIME(); for(i=0;i < nb;i += numberOp) { if ( numberOp == 1 ) { snprintf(buf,sizeof(buf), "%s/%s_%d",testDir,s,i); if ( toNull == 0 ) result = copyFile(buf, "sourceFile_0"); else result = readFile(buf, "sourceFile_0"); } else { snprintf(buf,sizeof(buf), "%s/%s_",testDir,s); result = copyNFile(buf, i, "sourceFile_", 0, numberOp<nb?numberOp:nb); } if ( result == 0 ) { ret = 0; break; } } ENDTIME(); /*******************/ /* check file size */ /*******************/ for( i=0;i < numberOp && toNull == 0; i++) { struct stat st; snprintf(buf,sizeof(buf), "sourceFile_%d", i); if (stat(buf,&st) == -1) { fprintf(stderr,"File %s not found\n",buf); ret = 0; } else { if ( st.st_size != actSize ) { fprintf(stderr,"Wrong size (%llu) for file %s\n",st.st_size, buf); ret = 0; } } } /* print out results */ SETDT(); if ( ! csv ) fprintf(stdout,"Read %-18s time %8.3f sec %12.3f MBps\n", spec, (double)dt/1000000, (double)(nb*actSize)/((double)dt/1000000)/(1024.0*1024.0) ); else fprintf(stdout,"\"%.3f\"\n", (double)(nb*actSize)/((double)dt/1000000)/(1024.0*1024.0) ); /* remove files from test directory */ deleteTestDir(0); } /* cleanup */ for (i=0; i < numberOp;i++) { snprintf(buf,sizeof(buf),"sourceFile_%d",i); deleteFile(buf); } deleteTestDir(1); return ret; }
//------------------------------------------------------------------------------ // Prepare the initial compressed column segment file for import. //------------------------------------------------------------------------------ int ColumnInfoCompressed::setupInitialColumnFile( HWM oldHwm, HWM hwm ) { char hdr[ compress::ERYDBCompressInterface::HDR_BUF_LEN * 2 ]; RETURN_ON_ERROR( colOp->readHeaders(curCol.dataFile.pFile, hdr) ); // Initialize the output buffer manager for the column. WriteEngine::ColumnBufferManager *mgr; if (column.colType == COL_TYPE_DICT) { mgr = new ColumnBufferManagerDctnry( this, 8, fLog, column.compressionType); RETURN_ON_ERROR( mgr->setDbFile(curCol.dataFile.pFile, hwm, hdr) ); } else { mgr = new ColumnBufferManager( this, column.width, fLog, column.compressionType); RETURN_ON_ERROR( mgr->setDbFile(curCol.dataFile.pFile, hwm, hdr) ); } fColBufferMgr = mgr; ERYDBCompressInterface compressor; int abbrevFlag = ( compressor.getBlockCount(hdr) == uint64_t(INITIAL_EXTENT_ROWS_TO_DISK*column.width/BYTE_PER_BLOCK) ); setFileSize( hwm, abbrevFlag ); // See if dealing with abbreviated extent that will need expanding. // This only applies to the first extent of the first segment file. setAbbrevExtentCheck(); // If we are dealing with initial extent, see if block skipping has // exceeded disk allocation, in which case we expand to a full extent. if (isAbbrevExtent()) { unsigned int numBlksForFirstExtent = (INITIAL_EXTENT_ROWS_TO_DISK*column.width) / BYTE_PER_BLOCK; if ( ((oldHwm+1) <= numBlksForFirstExtent) && ((hwm+1 ) > numBlksForFirstExtent) ) { RETURN_ON_ERROR( expandAbbrevExtent(false) ); } } // Store the current allocated file size in availFileSize. // Keep in mind, these are raw uncompressed offsets. // NOTE: We don't call setFileOffset() to set the file position in the // column segment file at this point; we wait till we load the compressed // buffer later on in ColumnBufferCompressed::initToBeCompressedBuffer() long long byteOffset = (long long)hwm * (long long)BYTE_PER_BLOCK; fSizeWritten = byteOffset; fSizeWrittenStart = fSizeWritten; availFileSize = fileSize - fSizeWritten; if (fLog->isDebug( DEBUG_1 )) { std::ostringstream oss; oss << "Init raw data offsets in compressed column file OID-" << curCol.dataFile.fid << "; DBRoot-" << curCol.dataFile.fDbRoot << "; part-" << curCol.dataFile.fPartition << "; seg-" << curCol.dataFile.fSegment << "; abbrev-" << abbrevFlag << "; begByte-"<< fSizeWritten << "; endByte-"<< fileSize << "; freeBytes-" << availFileSize; fLog->logMsg( oss.str(), MSGLVL_INFO2 ); } return NO_ERROR; }
void Image::parseDetails() { m_loadingDetails = false; // Aborted if (m_loadDetails->error() == QNetworkReply::OperationCanceledError) { m_loadDetails->deleteLater(); m_loadDetails = nullptr; return; } // Check redirection QUrl redir = m_loadDetails->attribute(QNetworkRequest::RedirectionTargetAttribute).toUrl(); if (!redir.isEmpty()) { m_pageUrl = redir; loadDetails(); return; } int statusCode = m_loadDetails->attribute(QNetworkRequest::HttpStatusCodeAttribute).toInt(); if (statusCode == 429) { log(QStringLiteral("Details limit reached (429). New try.")); loadDetails(true); return; } QString source = QString::fromUtf8(m_loadDetails->readAll()); // Get an api able to parse details Api *api = m_parentSite->detailsApi(); if (api == Q_NULLPTR) return; // Parse source ParsedDetails ret = api->parseDetails(source, m_parentSite); if (!ret.error.isEmpty()) { log(QStringLiteral("[%1][%2] %3").arg(m_parentSite->url(), api->getName(), ret.error), Logger::Warning); emit finishedLoadingTags(); return; } // Fill data from parsing result if (!ret.pools.isEmpty()) { m_pools = ret.pools; } if (!ret.tags.isEmpty()) { m_tags = ret.tags; } if (ret.createdAt.isValid()) { m_createdAt = ret.createdAt; } // Image url if (!ret.imageUrl.isEmpty()) { QString before = m_url; QUrl newUrl = m_parentSite->fixUrl(ret.imageUrl, before); m_url = newUrl.toString(); m_fileUrl = newUrl; if (before != m_url) { delete m_extensionRotator; m_extensionRotator = nullptr; setFileSize(0); emit urlChanged(before, m_url); } } // Get rating from tags if (m_rating.isEmpty()) { int ratingTagIndex = -1; for (int it = 0; it < m_tags.count(); ++it) { if (m_tags[it].type().name() == "rating") { m_rating = m_tags[it].text(); ratingTagIndex = it; break; } } if (ratingTagIndex != -1) { m_tags.removeAt(ratingTagIndex); } } m_loadDetails->deleteLater(); m_loadDetails = nullptr; m_loadedDetails = true; refreshTokens(); emit finishedLoadingTags(); }