Esempio n. 1
0
static int pound_it(int concurrency,
                    const char* type,
                    setup_fn do_setup,
                    connect_fn do_connect,
                    make_connect_fn make_connect,
                    void* arg) {
  double secs;
  int r;
  uint64_t start_time; /* in ns */
  uint64_t end_time;

  loop = uv_default_loop();

  uv_update_time(loop);
  start = uv_now(loop);

  /* Run benchmark for at least five seconds. */
  start_time = uv_hrtime();

  do_setup(concurrency, arg);

  r = do_connect(concurrency, make_connect, arg);
  ASSERT(!r);

  uv_run(loop, UV_RUN_DEFAULT);

  end_time = uv_hrtime();

  /* Number of fractional seconds it took to run the benchmark. */
  secs = (double)(end_time - start_time) / NANOSEC;

  LOGF("%s-conn-pound-%d: %.0f accepts/s (%d failed)\n",
       type,
       concurrency,
       closed_streams / secs,
       conns_failed);

  MAKE_VALGRIND_HAPPY();
  return 0;
}
Esempio n. 2
0
/* for libuv */
static void on_uv_timer_cb(uv_timer_t * handle)
{
    sg_etp_session_t * session = handle->data;
    IUINT32 now = 0;

    /*LOG_D("update %d", client->conv);*/

    /* update ikcp */
    now = (IUINT32)uv_now(session->loop);
    sg_etp_update_speed((sg_etp_t *)session, now);
    if (now >= session->kcp_update_time)
    {
        ikcp_update(session->kcp, now);
        session->kcp_update_time = ikcp_check(session->kcp, now);

        LOG_D("update %lu @ %lu, timeout: %lu", session->conv, session->kcp_update_time, session->recv_data_time);

        /* check received data and add to work queue */
        //recv_data_check(session);
        if (ikcp_peeksize(session->kcp) > 0)
        {
            uv_idle_start(&(session->idle), on_uv_idle_cb);
        }
    }

    /* check if session is timeout */
    if (session->recv_data_time < now)
    {
        session->to_close = true; /* mark to close this session. */
        ikcp_flush(session->kcp);
        LOG_I("session %lu timeout, will be closed", session->conv);
    }

    /* check if should close this session */
    if (session->to_close)
    {
        sg_etp_session_close(session);
    }
}
Esempio n. 3
0
int uv_fs_poll_start(uv_fs_poll_t* handle,
                     uv_fs_poll_cb cb,
                     const char* path,
                     unsigned int interval) {
    struct poll_ctx* ctx;
    uv_loop_t* loop;
    size_t len;

    if (uv__is_active(handle))
        return 0;

    loop = handle->loop;
    len = strlen(path);
    ctx = calloc(1, sizeof(*ctx) + len);

    if (ctx == NULL)
        return UV_ENOMEM;

    ctx->loop = loop;
    ctx->poll_cb = cb;
    ctx->interval = interval ? interval : 1;
    ctx->start_time = uv_now(loop);
    ctx->parent_handle = handle;
    memcpy(ctx->path, path, len + 1);

    if (uv_timer_init(loop, &ctx->timer_handle))
        abort();

    ctx->timer_handle.flags |= UV__HANDLE_INTERNAL;
    uv__handle_unref(&ctx->timer_handle);

    if (uv_fs_stat(loop, &ctx->fs_req, ctx->path, poll_cb))
        abort();

    handle->poll_ctx = ctx;
    uv__handle_start(handle);

    return 0;
}
Esempio n. 4
0
static void repeat_2_cb(uv_timer_t* handle, int status) {
  ASSERT(handle == &repeat_2);
  ASSERT(status == 0);
  ASSERT(repeat_2_cb_allowed);

  LOGF("repeat_2_cb called after %ld ms\n", (long int)(uv_now() - start_time));

  repeat_2_cb_called++;

  if (uv_timer_get_repeat(&repeat_2) == 0) {
    ASSERT(!uv_is_active((uv_handle_t*)handle));
    uv_close((uv_handle_t*)handle, close_cb);
    return;
  }

  LOGF("uv_timer_get_repeat %ld ms\n",
      (long int)uv_timer_get_repeat(&repeat_2));
  ASSERT(uv_timer_get_repeat(&repeat_2) == 100);

  /* This shouldn't take effect immediately. */
  uv_timer_set_repeat(&repeat_2, 0);
}
Esempio n. 5
0
static void repeat_1_cb(uv_timer_t* handle) {
  int r;

  ASSERT(handle == &repeat_1);
  ASSERT(uv_timer_get_repeat((uv_timer_t*)handle) == 50);

  LOGF("repeat_1_cb called after %ld ms\n",
      (long int)(uv_now(uv_default_loop()) - start_time));

  repeat_1_cb_called++;

  r = uv_timer_again(&repeat_2);
  ASSERT(r == 0);

  if (repeat_1_cb_called == 10) {
    uv_close((uv_handle_t*)handle, close_cb);
    /* We're not calling uv_timer_again on repeat_2 any more, so after this */
    /* timer_2_cb is expected. */
    repeat_2_cb_allowed = 1;
    return;
  }
}
Esempio n. 6
0
File: module.c Progetto: 0/julia
JL_DLLEXPORT jl_module_t *jl_new_module(jl_sym_t *name)
{
    jl_module_t *m = (jl_module_t*)jl_gc_allocobj(sizeof(jl_module_t));
    jl_set_typeof(m, jl_module_type);
    JL_GC_PUSH1(&m);
    assert(jl_is_symbol(name));
    m->name = name;
    m->parent = NULL;
    m->istopmod = 0;
    m->uuid = uv_now(uv_default_loop());
    m->counter = 0;
    htable_new(&m->bindings, 0);
    arraylist_new(&m->usings, 0);
    if (jl_core_module) {
        jl_module_using(m, jl_core_module);
    }
    // export own name, so "using Foo" makes "Foo" itself visible
    jl_set_const(m, name, (jl_value_t*)m);
    jl_module_export(m, name);
    JL_GC_POP();
    return m;
}
Esempio n. 7
0
static void pinger_read_cb(uv_stream_t* tcp,
                           ssize_t nread,
                           const uv_buf_t* buf) {
  ssize_t i;
  pinger_t* pinger;

  pinger = (pinger_t*)tcp->data;

  if (nread < 0) {
    ASSERT(nread == UV_EOF);

    if (buf->base) {
      buf_free(buf);
    }

    ASSERT(pinger_shutdown_cb_called == 1);
    uv_close((uv_handle_t*)tcp, pinger_close_cb);

    return;
  }

  /* Now we count the pings */
  for (i = 0; i < nread; i++) {
    ASSERT(buf->base[i] == PING[pinger->state]);
    pinger->state = (pinger->state + 1) % (sizeof(PING) - 1);
    if (pinger->state == 0) {
      pinger->pongs++;
      if (uv_now(loop) - start_time > TIME) {
        uv_shutdown(&pinger->shutdown_req, (uv_stream_t*) tcp, pinger_shutdown_cb);
        break;
      } else {
        pinger_write_ping(pinger);
      }
    }
  }

  buf_free(buf);
}
Esempio n. 8
0
static void pinger_read_cb(uv_tcp_t* tcp, int nread, uv_buf_t buf) {
  unsigned int i;
  pinger_t* pinger;

  pinger = (pinger_t*)tcp->data;

  if (nread < 0) {
    ASSERT(uv_last_error().code == UV_EOF);

    if (buf.base) {
      buf_free(buf);
    }

    ASSERT(pinger_shutdown_cb_called == 1);
    uv_close((uv_handle_t*)tcp);

    return;
  }

  /* Now we count the pings */
  for (i = 0; i < nread; i++) {
    ASSERT(buf.base[i] == PING[pinger->state]);
    pinger->state = (pinger->state + 1) % (sizeof(PING) - 1);
    if (pinger->state == 0) {
      pinger->pongs++;
      if (uv_now() - start_time > TIME) {
        uv_req_init(&pinger->shutdown_req, (uv_handle_t*)tcp, pinger_shutdown_cb);
        uv_shutdown(&pinger->shutdown_req);
        break;
      } else {
        pinger_write_ping(pinger);
      }
    }
  }

  buf_free(buf);
}
Esempio n. 9
0
static void repeat_2_cb(uv_timer_t* handle) {
  ASSERT(handle == &repeat_2);
  ASSERT(repeat_2_cb_allowed);

  fprintf(stderr, "repeat_2_cb called after %ld ms\n",
          (long int)(uv_now(uv_default_loop()) - start_time));
  fflush(stderr);

  repeat_2_cb_called++;

  if (uv_timer_get_repeat(&repeat_2) == 0) {
    ASSERT(0 == uv_is_active((uv_handle_t*) handle));
    uv_close((uv_handle_t*)handle, close_cb);
    return;
  }

  fprintf(stderr, "uv_timer_get_repeat %ld ms\n",
          (long int)uv_timer_get_repeat(&repeat_2));
  fflush(stderr);
  ASSERT(uv_timer_get_repeat(&repeat_2) == 100);

  /* This shouldn't take effect immediately. */
  uv_timer_set_repeat(&repeat_2, 0);
}
Esempio n. 10
0
static int luv_now(lua_State* L)
{
    uint64_t now = uv_now(luv_get_loop(L));
    lua_pushnumber(L, (lua_Number)now);
    return 1;
}
Esempio n. 11
0
int luv_now(lua_State* L) {
  int64_t now = uv_now(uv_default_loop());
  lua_pushinteger(L, now);
  return 1;
}
Esempio n. 12
0
void timer_cb(uv_timer_t* handle, int status) {
  printf("%lld\n", uv_now(handle->loop));
  printf("%lld\n", uv_hrtime());
  printf("%ld\n", time(NULL));
//  printf("%lld\n", get_time_ms());
}
Esempio n. 13
0
 uint64_t now() { return uv_now(m_uvloop.get()); }
Esempio n. 14
0
uint32_t sg_etp_now(sg_etp_t * client)
{
    return (uint32_t)uv_now(client->loop);
}
Esempio n. 15
0
File: client.c Progetto: torque/reki
static void Client_route( ClientConnection *client ) {
	#define OkayRoute "HTTP/1.0 200 OK\r\nContent-Type: text/plain\r\nConnection: close\r\nContent-Length:"
	#define InvalidRoute "HTTP/1.0 403 Forbidden\r\nContent-Type: text/plain\r\nConnection: close\r\nContent-Length:12\r\n\r\nGET WRECKED\n"

	HttpParserInfo *parserInfo = client->parserInfo;

	char *path, *query;
	size_t pathSize, querySize;
	HttpParser_parseURL( parserInfo, &path, &pathSize, &query, &querySize );

	dbg_info( "Requested path: %.*s", (int)pathSize, path );
	dbg_info( "Request query: %.*s", (int)querySize, query );
	if ( EqualLiteralLength( path, pathSize, "/announce" ) ) {
		StringBuffer_append( client->writeBuffer, OkayRoute, strlen( OkayRoute ) );
		ClientAnnounceData *announce = ClientAnnounceData_new( );
		Client_CheckAllocReplyError( client, announce );

		announce->score = uv_now( client->handle.stream->loop );
		client->requestType = ClientRequest_announce;
		client->request.announce = announce;
		if ( ClientAnnounceData_fromQuery( announce, query, querySize ) ) {
			log_warn( "%s", announce->errorMessage );
			Client_replyErrorLen( client, announce->errorMessage );
			return;
		}

		dbg_info( "There was no error parsing the announce." );
		if ( announce->event == AnnounceEvent_stop ) {
			StringBuffer_append( client->writeBuffer, "6\r\n\r\n3:bye\n", 11 );
			Client_reply( client );
			return;
		}

		// check if ip has been set
		if ( !(announce->compact[0] & CompactAddress_IPv4Flag) && !(announce->compact[0] & CompactAddress_IPv6Flag) ) {
			// read from socket and header.
			char *xRealIP = HttpParser_realIP( client->parserInfo );
			if ( xRealIP ) {
				CompactAddress_fromString( announce->compact, xRealIP, NULL );
				free( xRealIP );
			} else {
				struct sockaddr_storage sock;
				int len = sizeof(sock);
				int e = uv_tcp_getpeername( client->handle.tcpHandle, (struct sockaddr*)&sock, &len );
				if ( e ) {
					Client_replyErrorLen( client, "IP could not be determined." );
					return;
				}

				CompactAddress_fromSocket( announce->compact, &sock, false );
			}
		}
		CompactAddress_dump( announce->compact );
		MemoryStore_processAnnounce( client->server->memStore, client );

	} else if ( EqualLiteralLength( path, pathSize, "/scrape" ) ) {
		StringBuffer_append( client->writeBuffer, OkayRoute, strlen( OkayRoute ) );
		ScrapeData *scrape = ScrapeData_new( );
		Client_CheckAllocReplyError( client, scrape );

		client->requestType = ClientRequest_scrape;
		client->request.scrape = scrape;
		if ( ScrapeData_fromQuery( scrape, query, querySize ) ) {
			Client_replyErrorLen( client, "Invalid scrape request." );
			return;
		}

		MemoryStore_processScrape( client->server->memStore, client );

	} else {
		StringBuffer_append( client->writeBuffer, InvalidRoute, strlen( InvalidRoute ) );
		Client_reply( client );
	}
	#undef OkayRoute
	#undef InvalidRoute
}
Esempio n. 16
0
uint64_t Time_currentTimeMilliseconds(struct EventBase* eventBase)
{
    struct EventBase_pvt* base = Identity_cast((struct EventBase_pvt*) eventBase);
    return uv_now(base->loop) + base->baseTime;
}
Esempio n. 17
0
 ///!
 ///  ...
 ///  Internally, this function just calls uv_now() function.
 ///
 int64_t
 now() {
     return uv_now(uv_loop_);
 }
Esempio n. 18
0
int luv_now(lua_State* L) {
  double now = (double)uv_now(luv_get_loop(L));
  lua_pushnumber(L, now);
  return 1;
}
Esempio n. 19
0
static void writer(SLNPullRef const pull) {
	SLNSubmissionRef queue[QUEUE_SIZE];
	size_t count = 0;
	size_t skipped = 0;
	double time = uv_now(async_loop) / 1000.0;
	for(;;) {
		if(pull->stop) goto stop;

		async_mutex_lock(pull->mutex);
		while(0 == count || (count < QUEUE_SIZE && pull->count > 0)) {
			size_t const pos = pull->cur;
			while(!pull->filled[pos]) {
				async_cond_wait(pull->cond, pull->mutex);
				if(pull->stop) {
					async_mutex_unlock(pull->mutex);
					goto stop;
				}
				if(!count) time = uv_now(async_loop) / 1000.0;
			}
			assert(pull->filled[pos]);
			// Skip any bubbles in the queue.
			if(pull->queue[pos]) queue[count++] = pull->queue[pos];
			else skipped++;
			pull->queue[pos] = NULL;
			pull->filled[pos] = false;
			pull->cur = (pull->cur + 1) % QUEUE_SIZE;
			pull->count--;
			async_cond_broadcast(pull->cond);
		}
		async_mutex_unlock(pull->mutex);
		assert(count <= QUEUE_SIZE);

		for(;;) {
			int rc = SLNSubmissionStoreBatch(queue, count);
			if(rc >= 0) break;
			alogf("Submission error: %s (%d)\n", sln_strerror(rc), rc);
			async_sleep(1000 * 5);
		}
		for(size_t i = 0; i < count; ++i) {
			SLNSubmissionFree(&queue[i]);
		}

		double const now = uv_now(async_loop) / 1000.0;
		alogf("Pulled %f files per second\n", count / (now - time));
		time = now;
		count = 0;
		skipped = 0;

	}

stop:
	for(size_t i = 0; i < count; ++i) {
		SLNSubmissionFree(&queue[i]);
	}
	assert_zeroed(queue, count);

	async_mutex_lock(pull->mutex);
	assertf(pull->stop, "Writer ended early");
	assert(pull->tasks > 0);
	pull->tasks--;
	async_cond_broadcast(pull->cond);
	async_mutex_unlock(pull->mutex);
}
Esempio n. 20
0
int luv_now(lua_State* L) {
  int64_t now = uv_now(luv_get_loop(L));
  lua_pushinteger(L, now);
  return 1;
}
Esempio n. 21
0
File: init.c Progetto: AsamQi/LuaIO
int LuaIO_init(lua_State *L, int argc, char* argv[]) {
  /*config.h*/
  LuaIO_platform_init();
  LuaIO_date_init(); 
  LuaIO_pmemory_init(LUAIO_PMEMORY_MAX_FREE_CHUNKS);
  LuaIO_timer_init(LUAIO_TIMER_MAX_FREE_TIMERS);
  LuaIO_dns_init(L);
  LuaIO_tcp_connect_req_pool_init(LUAIO_TCP_CONNECT_REQ_POOL_MAX_FREE_CHUNKS);
  LuaIO_tcp_write_req_pool_init(LUAIO_TCP_WRITE_REQ_POOL_MAX_FREE_CHUNKS);
  LuaIO_tcp_shutdown_req_pool_init(LUAIO_TCP_SHUTDOWN_REQ_POOL_MAX_FREE_CHUNKS);
  LuaIO_fs_req_pool_init(LUAIO_FS_REQ_POOL_MAX_FREE_CHUNKS);

  LuaIO_start_time = uv_now(uv_default_loop());
  LuaIO_main_thread = L;

  /*preload*/
  lua_getglobal(L, "package");
  lua_getfield(L, -1, "preload");
  lua_remove(L, -2);

  /*errno*/
  lua_pushcfunction(L, luaopen_errno);
  lua_setfield(L, -2, "errno");
 
  /*system*/
  lua_pushcfunction(L, luaopen_system);
  lua_setfield(L, -2, "system");
 
  /*process*/
  lua_pushcfunction(L, luaopen_process);
  lua_setfield(L, -2, "process");
  
  /*date*/
  lua_pushcfunction(L, luaopen_date);
  lua_setfield(L, -2, "date");
  
  /*read_buffer*/
  lua_pushcfunction(L, luaopen_read_buffer);
  lua_setfield(L, -2, "read_buffer");
  
  /*write_buffer*/
  lua_pushcfunction(L, luaopen_write_buffer);
  lua_setfield(L, -2, "write_buffer");
  
  /*dns*/
  lua_pushcfunction(L, luaopen_dns);
  lua_setfield(L, -2, "dns");
  
  /*tcp_native*/
  lua_pushcfunction(L, luaopen_tcp);
  lua_setfield(L, -2, "tcp_native");
  
  /*fs_native*/
  lua_pushcfunction(L, luaopen_fs);
  lua_setfield(L, -2, "fs_native");
  
  /*http_parser*/
  /*lua_pushcfunction(L, luaopen_http_parser);*/
  /*lua_setfield(L, -2, "http_parser");*/

  lua_pop(L, 1);

  /*argv*/
  lua_createtable (L, argc, 0);
  for (int i = 0; i < argc; i++) {
    lua_pushstring (L, argv[i]);
    lua_rawseti(L, -2, i + 1);
  }
  lua_setglobal(L, "argv");

  /*sleep(delay)*/
  lua_pushcfunction(L, LuaIO_sleep);
  lua_setglobal(L, "sleep");

  return 0;
}
static void SLNFilterResultsWrite(SLNSessionRef const session, SLNFilterRef const filter, SLNFilterOpts *const opts, HTTPConnectionRef const conn) {
	// TODO: Accept count and use it for the total number of results.
	opts->count = 0;

	// We're sending a series of batches, so reversing one batch
	// doesn't make sense.
	opts->outdir = opts->dir;

	static strarg_t const fields[] = { "wait" };
	str_t *values[numberof(fields)] = {};
	QSValuesParse(qs, values, fields, numberof(fields));
	bool const wait = parse_wait(values[0]);
	QSValuesCleanup(values, numberof(values));

	// I'm aware that we're abusing HTTP for sending real-time push data.
	// I'd also like to support WebSocket at some point, but this is simpler
	// and frankly probably more widely supported.
	// Note that the protocol doesn't really break even if this data is
	// cached. It DOES break if a proxy tries to buffer the whole response
	// before passing it back to the client. I'd be curious to know whether
	// such proxies still exist in 2015.
	HTTPConnectionWriteResponse(conn, 200, "OK");
	HTTPConnectionWriteHeader(conn, "Transfer-Encoding", "chunked");
	HTTPConnectionWriteHeader(conn,
		"Content-Type", "text/uri-list; charset=utf-8");
	HTTPConnectionWriteHeader(conn, "Cache-Control", "no-store");
	HTTPConnectionWriteHeader(conn, "Vary", "*");
	HTTPConnectionBeginBody(conn);
	int rc;

	for(;;) {
		rc = sendURIBatch(session, filter, opts, conn);
		if(DB_NOTFOUND == rc) break;
		if(DB_SUCCESS == rc) continue;
		fprintf(stderr, "Query error: %s\n", db_strerror(rc));
		goto cleanup;
	}

	if(!wait || opts->dir < 0) goto cleanup;

	SLNRepoRef const repo = SLNSessionGetRepo(session);
	for(;;) {
		uint64_t const timeout = uv_now(async_loop)+(1000 * 30);
		rc = SLNRepoSubmissionWait(repo, opts->sortID, timeout);
		if(UV_ETIMEDOUT == rc) {
			uv_buf_t const parts[] = { uv_buf_init((char *)STR_LEN("\r\n")) };
			rc = HTTPConnectionWriteChunkv(conn, parts, numberof(parts));
			if(rc < 0) break;
			continue;
		}
		assert(rc >= 0); // TODO: Handle cancellation?

		for(;;) {
			rc = sendURIBatch(session, filter, opts, conn);
			if(DB_NOTFOUND == rc) break;
			if(DB_SUCCESS == rc) continue;
			fprintf(stderr, "Query error: %s\n", db_strerror(rc));
			goto cleanup;
		}
	}

cleanup:
	HTTPConnectionWriteChunkEnd(conn);
	HTTPConnectionEnd(conn);
	SLNFilterOptsCleanup(opts);
}