示例#1
0
static void
on_content_changed (struct evbuffer                * buf,
                    const struct evbuffer_cb_info  * info,
                    void                           * vtask)
{
  const size_t n_added = info->n_added;
  struct tr_webseed_task * task = vtask;
  tr_session * session = task->session;

  tr_sessionLock (session);

  if (!task->dead && (n_added>0))
    {
      uint32_t len;
      struct tr_webseed * w = task->webseed;

      tr_bandwidthUsed (&w->bandwidth, TR_DOWN, n_added, true, tr_time_msec ());
      fire_client_got_piece_data (w, n_added);
      len = evbuffer_get_length (buf);

      if (!task->response_code)
        {
          tr_webGetTaskInfo (task->web_task, TR_WEB_GET_CODE, &task->response_code);

          if (task->response_code == 206)
            {
              const char * url;
              struct connection_succeeded_data * data;

              url = NULL;
              tr_webGetTaskInfo (task->web_task, TR_WEB_GET_REAL_URL, &url);

              data = tr_new (struct connection_succeeded_data, 1);
              data->webseed = w;
              data->real_url = tr_strdup (url);
              data->piece_index = task->piece_index;
              data->piece_offset = task->piece_offset + (task->blocks_done * task->block_size) + (len - 1);

              /* processing this uses a tr_torrent pointer,
                 so push the work to the libevent thread... */
              tr_runInEventThread (w->session, connection_succeeded, data);
            }
        }
示例#2
0
static void
canReadWrapper( tr_peerIo * io )
{
    tr_bool err = 0;
    tr_bool done = 0;
    tr_session * session;

    dbgmsg( io, "canRead" );

    assert( tr_isPeerIo( io ) );
    assert( tr_isSession( io->session ) );
    tr_peerIoRef( io );

    session = io->session;

    /* try to consume the input buffer */
    if( io->canRead )
    {
        tr_sessionLock( session );

        while( !done && !err )
        {
            size_t piece = 0;
            const size_t oldLen = EVBUFFER_LENGTH( io->inbuf );
            const int ret = io->canRead( io, io->userData, &piece );

            const size_t used = oldLen - EVBUFFER_LENGTH( io->inbuf );

            assert( tr_isPeerIo( io ) );

            if( piece || (piece!=used) )
            {
                const uint64_t now = tr_time_msec( );

                if( piece )
                    tr_bandwidthUsed( &io->bandwidth, TR_DOWN, piece, TRUE, now );

                if( used != piece )
                    tr_bandwidthUsed( &io->bandwidth, TR_DOWN, used - piece, FALSE, now );
            }

            switch( ret )
            {
                case READ_NOW:
                    if( EVBUFFER_LENGTH( io->inbuf ) )
                        continue;
                    done = 1;
                    break;

                case READ_LATER:
                    done = 1;
                    break;

                case READ_ERR:
                    err = 1;
                    break;
            }

            assert( tr_isPeerIo( io ) );
        }

        tr_sessionUnlock( session );
    }

    /* keep the iobuf's excess capacity from growing too large */
    if( EVBUFFER_LENGTH( io->inbuf ) == 0 ) {
        evbuffer_free( io->inbuf );
        io->inbuf = evbuffer_new( );
    }

    assert( tr_isPeerIo( io ) );
    tr_peerIoUnref( io );
}