Пример #1
0
void on_nble_gap_connect_evt(const struct nble_gap_connect_evt *ev)
{
	struct bt_conn *conn;

	BT_DBG("handle %u role %u", ev->conn_handle, ev->role_slave);

	conn = conn_get(&ev->peer_bda);
	if (!conn) {
		BT_ERR("Unable to get bt_conn object");
		return;
	}

	conn->handle = ev->conn_handle;
	conn->role = ev->role_slave ? BT_CONN_ROLE_SLAVE : BT_CONN_ROLE_MASTER;
	conn->interval = ev->conn_values.interval;
	conn->latency = ev->conn_values.latency;
	conn->timeout = ev->conn_values.supervision_to;
	bt_addr_le_copy(&conn->dst, &ev->peer_bda);
	k_delayed_work_init(&conn->update_work, le_conn_update);

	conn->state = BT_CONN_CONNECTED;

	notify_connected(conn);

	/*
	 * Core 4.2 Vol 3, Part C, 9.3.12.2
	 * The Peripheral device should not perform a Connection Parameter
	 * Update procedure within 5 s after establishing a connection.
	 */
	k_delayed_work_submit(&conn->update_work,
			      conn->role == BT_HCI_ROLE_MASTER ? K_NO_WAIT :
			      CONN_UPDATE_TIMEOUT);
}
Пример #2
0
/* send then recv 1 message with rank */
  static int
send_recv_msg(struct message *msg, int rank)
{
  int ret = -1, i = 2;
  char port[HOST_NAME_MAX];
  struct sockconn conn;
  memset(&conn, 0, sizeof(conn));
  BUG(!msg);
  BUG(rank > node_file_entries - 1);
  snprintf(port, HOST_NAME_MAX, "%d", node_file[rank].ocm_port);
  if (conn_connect(&conn, node_file[rank].ip_eth, port))
    goto out;
  while (i-- > 0) {
    switch (i) {
      case 1: ret = conn_put(&conn, msg, sizeof(*msg)); break;
      case 0: ret = conn_get(&conn, msg, sizeof(*msg)); break;
    }
    if (--ret < 0) /* 0 means remote closed; turn into error condition */
      break;
  }
  if (ret < 0 || (ret = conn_close(&conn)))
    goto out;
  ret = 0;
out:
  /* TODO close connection on error */
  return ret;
}
Пример #3
0
void accept_cb(struct wx_worker_s* wk) {
    struct conn_s* conn = conn_get();
    if (conn ==  NULL) {
        wx_err("no more free connction");
        return;
    }

    int cfd = wx_accept(wk->listen_fd, NULL, 0);
    if (cfd < 0) {
        conn_put(conn);
        return;
    }

    int p = fcntl(cfd, F_GETFL);
    if (-1 == p || -1 == fcntl(cfd, F_SETFL, p|O_NONBLOCK)) {
        wx_err("fcntl");
        conn_put(conn);
        return;
    }

    int one = 1;
    setsockopt(cfd, SOL_TCP, TCP_NODELAY, &one, sizeof(one));

    conn->buf = (struct wx_buf_s*)conn->data;
    conn->buf->base = conn->buf->data;
    conn->buf->size = sizeof(conn->data) - sizeof(struct wx_buf_s);
    wx_conn_read_start(&conn->wx_conn, cfd);
}
Пример #4
0
struct conn *
server_conn(struct server *server)
{
	struct server_pool *pool;
	struct conn *conn;

	pool = server->owner;

	/*
	 * FIXME: handle multiple server connections per server and do load
	 * balancing on it. Support multiple algorithms for
	 * 'server_connections:' > 0 key
	 */

	if (server->ns_conn_q < pool->server_connections) {
		return conn_get(server, false, pool->redis);
	}
	ASSERT(server->ns_conn_q == pool->server_connections);

	/*
	 * Pick a server connection from the head of the queue and insert
	 * it back into the tail of queue to maintain the lru order
	 */
	conn = TAILQ_FIRST(&server->s_conn_q);
	ASSERT(!conn->client && !conn->proxy);

	TAILQ_REMOVE(&server->s_conn_q, conn, conn_tqe);
	TAILQ_INSERT_TAIL(&server->s_conn_q, conn, conn_tqe);

	return conn;
}
Пример #5
0
int user_arp_hijack(struct user_conn_info *uci, char *src_fake_mac,
		    char *dst_fake_mac, int input_mode)
{
	struct conn_info *ci;
	int retval;
	
	if (!(ci = conn_get(uci))) {
		printf("connection isn't available\n");
		retval = 1;
	} else {
		retval = arp_hijack(ci, src_fake_mac, dst_fake_mac, input_mode);
		conn_free(ci);
	}
	return retval;
}
Пример #6
0
static int isert_process_all_writes(struct iscsi_conn *conn)
{
	struct iscsi_cmnd *cmnd;
	int res = 0;

	TRACE_ENTRY();

	while ((cmnd = iscsi_get_send_cmnd(conn)) != NULL) {
		isert_update_len_sn(cmnd);
		conn_get(conn);
		isert_pdu_tx(cmnd);
	}

	TRACE_EXIT_RES(res);
	return res;
}
Пример #7
0
struct bt_conn *bt_conn_create_le(const bt_addr_le_t *peer,
				  const struct bt_le_conn_param *param)
{
	struct nble_gap_connect_req req;
	struct bt_conn *conn;

	BT_DBG("");

	if (!bt_le_conn_params_valid(param->interval_min, param->interval_max,
				     param->latency, param->timeout)) {
		return NULL;
	}

	conn = conn_get(peer);
	if (!conn) {
		BT_ERR("Unable to get bt_conn object");
		return NULL;
	}

	/* Update connection parameters */
	bt_addr_le_copy(&conn->dst, peer);
	conn->latency = param->latency;
	conn->timeout = param->timeout;

	memset(&req, 0, sizeof(req));

	/* Construct parameters to NBLE */
	bt_addr_le_copy(&req.bda, peer);

	req.conn_params.interval_min = param->interval_min;
	req.conn_params.interval_max = param->interval_max;
	req.conn_params.slave_latency = param->latency;
	req.conn_params.link_sup_to = param->timeout;

	req.scan_params.interval = BT_GAP_SCAN_FAST_INTERVAL;
	req.scan_params.window = BT_GAP_SCAN_FAST_WINDOW;

	conn->state = BT_CONN_CONNECT;

	nble_gap_connect_req(&req, conn);

	return conn;
}
Пример #8
0
static int
make_conn(struct context *ctx, void *arg)
{
    rstatus_t status;
    struct conn *conn;

    ASSERT(!make_conn_done(ctx));

    conn = conn_get(ctx);
    if (conn == NULL) {
        ctx->nconn_create_failed++;
        goto done;
    }

    status = core_connect(ctx, conn);
    if (status != MCP_OK) {
        ctx->nconn_create_failed++;
        ecb_signal(ctx, EVENT_CONN_FAILED, conn);
        goto done;
    }

    ctx->nconn_created++;
    ecb_signal(ctx, EVENT_CONN_CREATED, conn);

done:
    if (make_conn_done(ctx)) {
        log_debug(LOG_NOTICE, "created %"PRIu32" %"PRIu32" of %"PRIu32" "
                  "connections", ctx->nconn_create_failed, ctx->nconn_created,
                  ctx->opt.num_conns);
        if (ctx->nconn_destroyed == ctx->nconn_created) {
            core_stop(ctx);
        }
        return -1;
    }

    log_debug(LOG_INFO, "created %"PRIu32" %"PRIu32" of %"PRIu32" "
              "connections", ctx->nconn_create_failed, ctx->nconn_created,
              ctx->opt.num_conns);

    return 0;
}
Пример #9
0
static void isert_send_data_rsp(struct iscsi_cmnd *req, u8 *sense,
				int sense_len, u8 status, int is_send_status)
{
	struct iscsi_cmnd *rsp;

	TRACE_ENTRY();

	sBUG_ON(!is_send_status);

	rsp = create_status_rsp(req, status, sense, sense_len);

	isert_update_len_sn(rsp);

	conn_get(rsp->conn);
	if (status != SAM_STAT_CHECK_CONDITION)
		isert_send_data_in(req, rsp);
	else
		isert_pdu_tx(rsp);

	TRACE_EXIT();
}
Пример #10
0
/* Run a transition of the FSM */
int fsm_step(url_t* url, pfd_t* pfd)
{
    int rc1 = 0, rc2 = 0;
    char buffer[BUFSIZ];        /* To pass in to readConn */

    conn_t* conn = conn_get(pfd_getfd(pfd));

    if (pfd->revents & POLLOUT)
        rc1 = writeConn(url, conn);

    if (pfd->revents & POLLIN)
        rc2 = readConn(conn, buffer, sizeof(buffer));

    if (verbose > 2)
        fprintf(stderr, "Stepped conn #%d -> %s\n", conn->index,
                state_names[conn->state]);

    if (rc2 == ST_OK && out_file != 0)
        fprintf(out_file, "%s %s\n", conn->last_etag, conn->last_path);

    if (rc2 == ST_OK || rc2 == ST_DUP) {
        conn_reset(conn, (struct sockaddr*) &url->addr);
    }

    if (rc1 == ST_ERROR || rc2 == ST_ERROR) {
        if (verbose) {
            char buf[100];
            snprintf(buf, sizeof(buf),
                     "FSM error {r:%s w:%s}", rc_names[rc2], rc_names[rc1]);
            error(conn, buf);
        }
        conn_reset(conn, (struct sockaddr*) &url->addr);
    }

    if (rc1 != ST_OK && rc1 != ST_CONTINUE)
        return rc1;
    return rc2;
}
Пример #11
0
/*
 * Dispatches a new connection to another thread. This is only ever called
 * from the main thread, either during initialization (for UDP) or because
 * of an incoming connection.
 */
rstatus_t
thread_dispatch(int sd, conn_state_t state, int ev_flags, int udp)
{
    int tid;
    struct thread_worker *t;
    ssize_t n;
    struct conn *c;
    int rsize;

    rsize = udp ? UDP_BUFFER_SIZE : TCP_BUFFER_SIZE;

    c = conn_get(sd, state, ev_flags, rsize, udp);
    if (c == NULL) {
        return MC_ENOMEM;
    }

    mc_resolve_peer(c->sd, c->peer, sizeof(c->peer));

    tid = (last_thread + 1) % settings.num_workers;
    t = threads + tid;
    last_thread = tid;

    conn_cq_push(&t->new_cq, c);

    n = write(t->notify_send_fd, "", 1);
    if (n != 1) {
        log_warn("write to notify pipe %d failed: %s", t->notify_send_fd,
                 strerror(errno));
        return MC_ERROR;
    }

    if (state == CONN_NEW_CMD) {
        log_debug(LOG_NOTICE, "accepted c %d from '%s' on tid %d", c->sd,
                  c->peer, tid);
    }

    return MC_OK;
}
Пример #12
0
static int
batch_deliver(struct cuda_rpc *rpc, struct cuda_packet *return_pkt)
{
	int exit_errno;
	struct cuda_pkt_batch *batch = &rpc->batch;
	size_t payload_len = 0UL;
    struct flush *f;
    struct timer t;

	printd(DBG_INFO, "pkts = %lu size = %lu\n",
			batch->header.num_pkts, batch->header.bytes_used);

    timer_init(CLOCK_REALTIME, &t);

    f = &flushes[num_flushes];
    clock_gettime(CLOCK_REALTIME, &f->ts);
    f->bytes = sizeof(batch->header);
    f->blocking = false;
	FAIL_ON_CONN_ERR( conn_put(&rpc->sockconn, &batch->header, sizeof(batch->header)) );

#if defined(NIC_SDP)
    f = &flushes[++num_flushes];
    clock_gettime(CLOCK_REALTIME, &f->ts);
    f->bytes = batch->header.bytes_used + ZCPY_TRIGGER_SZ;
    timer_start(&t); // ignored if batch is non-blocking
	FAIL_ON_CONN_ERR( conn_put(&rpc->sockconn, batch->buffer, batch->header.bytes_used + ZCPY_TRIGGER_SZ) );
#else
	FAIL_ON_CONN_ERR( conn_put(&rpc->sockconn, batch->buffer, batch->header.bytes_used) );
#endif

#ifndef NO_PIPELINING
    if (last_pkt(rpc)->is_sync) { /* only expect a return packet if last is sync */
#endif

#if defined(NIC_SDP)
        f->blocking = true;
        f->bytes += sizeof(*return_pkt) + ZCPY_TRIGGER_SZ;
	    FAIL_ON_CONN_ERR( conn_get(&rpc->sockconn, return_pkt, sizeof(*return_pkt) + ZCPY_TRIGGER_SZ) );
#else
	    FAIL_ON_CONN_ERR( conn_get(&rpc->sockconn, return_pkt, sizeof(*return_pkt)) );
#endif

	    payload_len = return_pkt->len - sizeof(*return_pkt);
	    if (payload_len > 0) {
#if defined(NIC_SDP)
            f->bytes += payload_len + ZCPY_TRIGGER_SZ;
            f->has_ret_payload = true;
		    FAIL_ON_CONN_ERR( conn_get(&rpc->sockconn, (return_pkt + 1), payload_len + ZCPY_TRIGGER_SZ) );
#else
		    FAIL_ON_CONN_ERR( conn_get(&rpc->sockconn, (return_pkt + 1), payload_len) );
#endif
	    }
#ifndef NO_PIPELINING
        f->lat = timer_end(&t, MICROSECONDS);
        f->exec = return_pkt->execlat;
    }
#endif
    ++num_flushes;
	batch_clear(batch);
	return 0;
fail:
	return exit_errno;
}
Пример #13
0
//接收到客户端连接后,返回新的fd,为该fd创建新的conn来读取数据
static rstatus_t
proxy_accept(struct context *ctx, struct conn *p) //p对应的是proxy conn 也就是用于监听客户端的conn信息
{
    rstatus_t status;
    struct conn *c;
    int sd;
    struct server_pool *pool = p->owner;

    ASSERT(p->proxy && !p->client);
    ASSERT(p->sd > 0);
    ASSERT(p->recv_active && p->recv_ready);

    for (;;) {
        sd = accept(p->sd, NULL, NULL); //获取到新的客户端连接,产生新的fd
        if (sd < 0) {
            if (errno == EINTR) {
                log_debug(LOG_VERB, "accept on p %d not ready - eintr", p->sd);
                continue;
            }

            if (errno == EAGAIN || errno == EWOULDBLOCK || errno == ECONNABORTED) {
                log_debug(LOG_VERB, "accept on p %d not ready - eagain", p->sd);
                p->recv_ready = 0;
                return NC_OK;
            }

            /*
             * Workaround of https://github.com/twitter/twemproxy/issues/97
             *
             * We should never reach here because the check for conn_ncurr_cconn()
             * against ctx->max_ncconn should catch this earlier in the cycle.
             * If we reach here ignore EMFILE/ENFILE, return NC_OK will enable
             * the server continue to run instead of close the server socket
             *
             * The right solution however, is on EMFILE/ENFILE to mask out IN
             * event on the proxy and mask it back in when some existing
             * connections gets closed
             */
            if (errno == EMFILE || errno == ENFILE) {
                log_debug(LOG_CRIT, "accept on p %d with max fds %"PRIu32" "
                          "used connections %"PRIu32" max client connections %"PRIu32" "
                          "curr client connections %"PRIu32" failed: %s",
                          p->sd, ctx->max_nfd, conn_ncurr_conn(),
                          ctx->max_ncconn, conn_ncurr_cconn(), strerror(errno));

                p->recv_ready = 0;

                return NC_OK;
            }

            log_error("accept on p %d failed: %s", p->sd, strerror(errno));

            return NC_ERROR;
        }

        break;
    }

    if (conn_ncurr_cconn() >= ctx->max_ncconn) {
        log_debug(LOG_CRIT, "client connections %"PRIu32" exceed limit %"PRIu32,
                  conn_ncurr_cconn(), ctx->max_ncconn);
        status = close(sd);
        if (status < 0) {
            log_error("close c %d failed, ignored: %s", sd, strerror(errno));
        }
        return NC_OK;
    }

    c = conn_get(p->owner, true, p->redis);
    if (c == NULL) {
        log_error("get conn for c %d from p %d failed: %s", sd, p->sd,
                  strerror(errno));
        status = close(sd);
        if (status < 0) {
            log_error("close c %d failed, ignored: %s", sd, strerror(errno));
        }
        return NC_ENOMEM;
    }
    c->sd = sd;

    stats_pool_incr(ctx, c->owner, client_connections);

    status = nc_set_nonblocking(c->sd);
    if (status < 0) {
        log_error("set nonblock on c %d from p %d failed: %s", c->sd, p->sd,
                  strerror(errno));
        c->close(ctx, c);
        return status;
    }

    if (pool->tcpkeepalive) {
        status = nc_set_tcpkeepalive(c->sd);
        if (status < 0) {
            log_warn("set tcpkeepalive on c %d from p %d failed, ignored: %s",
                     c->sd, p->sd, strerror(errno));
        }
    }

    if (p->family == AF_INET || p->family == AF_INET6) {
        status = nc_set_tcpnodelay(c->sd);
        if (status < 0) {
            log_warn("set tcpnodelay on c %d from p %d failed, ignored: %s",
                     c->sd, p->sd, strerror(errno));
        }
    }

    status = event_add_conn(ctx->evb, c);
    if (status < 0) {
        log_error("event add conn from p %d failed: %s", p->sd,
                  strerror(errno));
        c->close(ctx, c);
        return status;
    }

    log_debug(LOG_INFO, "accepted c %d on p %d from '%s'", c->sd, p->sd,
              nc_unresolve_peer_desc(c->sd));

    return NC_OK;
}
Пример #14
0
/* <-- process requests from other daemons */
  static void *
inbound_thread(void *arg)
{
  struct sockconn *conn = (struct sockconn*)arg;
  struct message msg;
  int ret = 0;
  BUG(!conn);
  printd("spawned\n");
  while (true) {
    ret = conn_get(conn, &msg, sizeof(msg));
    if (ret < 1)
      break;
    printd("got msg %s\n", MSG_TYPE2STR(msg.type));
    if (msg.type == MSG_ADD_NODE) {
      alloc_add_node(msg.rank, &msg.u.node.config);
    } else if (msg.type == MSG_REQ_ALLOC) {
      //Currently only rank 0 can handle inital allocation request
      //messages to determine the rank of the node that will fulfill
      //the allocation
      BUG(myrank != 0);
      msg_recv_req_alloc(&msg);
      ret = conn_put(conn, &msg, sizeof(msg));
      if (--ret < 0)
        break;
    } else if (msg.type == MSG_DO_ALLOC) {

      //As remote allocations are created, assign them an identifying ID
      printd("Remote allocation has local ID of %lu\n", rem_alloc_id);
      msg.u.alloc.rem_alloc_id = rem_alloc_id;
      //Increment the ID for each allocation
      rem_alloc_id++;

#ifdef INFINIBAND
      /* First, send msg back to orig rank to unblock app, so it can
       * initiate connection to us. Then listen for connections.
       * XXX possible race condition
       */
      msg.u.alloc.u.rdma.port = ib_port;
      ib_port += 1;

      ret = conn_put(conn, &msg, sizeof(msg));
      if (--ret < 0)
        break;
      msg_recv_do_alloc(&msg); /* blocks */
#endif
#ifdef EXTOLL
      /* EXTOLL server allocations are nonblocking and the call to
       * alloc_ate should return the needed setup parameters for the client
       * in msg.
       */
      msg_recv_do_alloc(&msg); /* should not block for EXTOLL setup */
      ret = conn_put(conn, &msg, sizeof(msg));

#endif
    } 
    else if (msg.type == MSG_DO_FREE) 
    {
      printd("InboundThread received free request for allocation \n");
      //Free the remote allocation
      msg_recv_do_free(&msg);
      ret = conn_put(conn, &msg, sizeof(msg));

    } else if (msg.type == MSG_REQ_FREE) {
      //TODO - should only be received at root node and releases data structures
      //that hold information about this allocation
      ret = 0;
      ret = conn_put(conn, &msg, sizeof(msg));
    } else {
      printd("unhandled message %s\n", MSG_TYPE2STR(msg.type));
      BUG(1);
    }
  }
  printd("exiting %s\n", (ret < 0 ? "with error" : "normally"));
  if (ret) BUG(1);
  return NULL;
}
Пример #15
0
static rstatus_t
proxy_accept(struct context *ctx, struct conn *p)
{
    rstatus_t status;
    struct conn *c;
    int sd;

    ASSERT(p->proxy && !p->client);
    ASSERT(p->sd > 0);
    ASSERT(p->recv_active && p->recv_ready);

    for (;;) {
        sd = accept(p->sd, NULL, NULL);
        if (sd < 0) {
            if (errno == EINTR) {
                log_debug(LOG_VERB, "accept on p %d not ready - eintr", p->sd);
                continue;
            }

            if (errno == EAGAIN || errno == EWOULDBLOCK) {
                log_debug(LOG_VERB, "accept on p %d not ready - eagain", p->sd);
                p->recv_ready = 0;
                return DN_OK;
            }

            /*
             * FIXME: On EMFILE or ENFILE mask out IN event on the proxy; mask
             * it back in when some existing connection gets closed
             */

            log_error("accept on p %d failed: %s", p->sd, strerror(errno));
            return DN_ERROR;
        }

        break;
    }

    c = conn_get(p->owner, true, p->data_store);
    if (c == NULL) {
        log_error("get conn for c %d from p %d failed: %s", sd, p->sd,
                  strerror(errno));
        status = close(sd);
        if (status < 0) {
            log_error("close c %d failed, ignored: %s", sd, strerror(errno));
        }
        return DN_ENOMEM;
    }
    c->sd = sd;

    stats_pool_incr(ctx, c->owner, client_connections);

    status = dn_set_nonblocking(c->sd);
    if (status < 0) {
        log_error("set nonblock on c %d from p %d failed: %s", c->sd, p->sd,
                  strerror(errno));
        c->close(ctx, c);
        return status;
    }

    if (p->family == AF_INET || p->family == AF_INET6) {
        status = dn_set_tcpnodelay(c->sd);
        if (status < 0) {
            log_warn("set tcpnodelay on c %d from p %d failed, ignored: %s",
                     c->sd, p->sd, strerror(errno));
        }
    }

    status = event_add_conn(ctx->evb, c);
    if (status < 0) {
        log_error("event add conn from p %d failed: %s", p->sd,
                  strerror(errno));
        c->close(ctx, c);
        return status;
    }

    log_debug(LOG_NOTICE, "accepted c %d on p %d from '%s'", c->sd, p->sd,
              dn_unresolve_peer_desc(c->sd));

    return DN_OK;
}
Пример #16
0
/**
 * Get connection info for a given process id.
 *
 * For logical NIs the connection is contained in the rank table.
 * For physical NIs the connection is held in a binary tree using
 * the ID as a sorting value.
 *
 * For physical NIs if this is the first time we are sending a message
 * to this process create a new conn_t. For logical NIs the conn_t
 * structs are all allocated when the rank table is loaded.
 *
 * @param[in] ni the NI from which to get the connection
 * @param[in] id the process ID to lookup
 *
 * @return the conn_t and takes a reference on it
 */
conn_t *get_conn(ni_t *ni, ptl_process_t id)
{
    conn_t *conn;
    void **ret;

    if (ni->options & PTL_NI_LOGICAL) {
        if (unlikely(id.rank >= ni->logical.map_size)) {
            ptl_warn("Invalid rank (%d >= %d)\n", id.rank,
                     ni->logical.map_size);
            return NULL;
        }

        conn = ni->logical.rank_table[id.rank].connect;
        conn_get(conn);
    } else {
        conn_t conn_search;

        PTL_FASTLOCK_LOCK(&ni->physical.lock);

        /* lookup in binary tree */
        conn_search.id = id;
        ret = tfind(&conn_search, &ni->physical.tree, compare_conn_id);
        if (ret) {
            conn = *ret;
            conn_get(conn);
        } else {
            /* Not found. Allocate and insert. */
            if (conn_alloc(ni, &conn)) {
                PTL_FASTLOCK_UNLOCK(&ni->physical.lock);
                WARN();
                return NULL;
            }
#if IS_PPE || WITH_TRANSPORT_SHMEM
            //need to connect local processes over shared memory
            if (conn->id.phys.nid == ni->iface->id.phys.nid) {
                if (get_param(PTL_ENABLE_MEM)) {
#if IS_PPE
                    conn->transport = transport_mem;
#elif WITH_TRANSPORT_SHMEM
                    conn->transport = transport_shmem;
#endif
                    conn->state = CONN_STATE_CONNECTED;
                }
            }
#endif

            conn->id = id;

            /* Get the IP address from the NID. */
            conn->sin.sin_family = AF_INET;
            conn->sin.sin_addr.s_addr = nid_to_addr(id.phys.nid);
            conn->sin.sin_port = pid_to_port(id.phys.pid);

            /* insert new conn into binary tree */
            ret = tsearch(conn, &ni->physical.tree, compare_conn_id);
            if (!ret) {
                WARN();
                conn_put(conn);
                conn = NULL;
            } else {
                conn_get(conn);
            }
        }

        PTL_FASTLOCK_UNLOCK(&ni->physical.lock);
    }

    return conn;
}
Пример #17
0
static rstatus_t
proxy_accept(struct context *ctx, struct conn *p)
{
    rstatus_t status;
    struct conn *c;
    int sd;
    struct sockaddr_storage addr;
    socklen_t addr_len;

    ASSERT(p->proxy && !p->client);
    ASSERT(p->sd > 0);
    ASSERT(p->recv_active && p->recv_ready);

    for (;;) {
        sd = accept(p->sd, NULL, NULL);
        if (sd < 0) {
            if (errno == EINTR) {
                log_debug(LOG_VERB, "accept on p %d not ready - eintr", p->sd);
                continue;
            }

            if (errno == EAGAIN || errno == EWOULDBLOCK || errno == ECONNABORTED) {
                log_debug(LOG_VERB, "accept on p %d not ready - eagain", p->sd);
                p->recv_ready = 0;
                return NC_OK;
            }

            /*
             * FIXME: On EMFILE or ENFILE mask out IN event on the proxy; mask
             * it back in when some existing connection gets closed
             */
            
            /* 
             * Workaround of https://github.com/twitter/twemproxy/issues/97
             * Just ignore EMFILE/ENFILE, return NC_OK will enable the server 
             * continue to run instead of close the server socket
             */
            if (errno == EMFILE || errno == ENFILE) {
                log_crit("accept on p %d failed: %s", p->sd,
                         strerror(errno));
                p->recv_ready = 0;

                log_crit("connections status: rlimit nofile %d, "
                         "used connections: %d, max client connections %d, "
                         "curr client connections %d", ctx->rlimit_nofile,
                         conn_ncurr(), ctx->max_ncconn, conn_ncurr_cconn());
                /* Since we maintain a safe max_ncconn and check
                 * it after every accept, we should not reach here.
                 * So we will panic after this log */
                log_panic("HIT MAX OPEN FILES, IT SHOULD NOT HAPPEN. ABORT.");

                return NC_OK;
            }

            log_error("accept on p %d failed: %s", p->sd, strerror(errno));
            return NC_ERROR;
        }
        addr_len = sizeof(addr);
        if (getsockname(sd, (struct sockaddr *)&addr, &addr_len)) {
            log_error("getsockname on p %d failed: %s", p->sd, strerror(errno));
            close(sd);
            continue;
        }

        break;
    }

    if (conn_ncurr_cconn() >= ctx->max_ncconn) {
        stats_pool_incr(ctx, p->owner, rejected_connections);

        log_crit("client connections %d exceed limit %d",
                 conn_ncurr_cconn(), ctx->max_ncconn);
        status = close(sd);
        if (status < 0) {
            log_error("close c %d failed, ignored: %s", sd, strerror(errno));
        }
        return NC_OK;
    }

    c = conn_get(p->owner, true, p->redis);
    if (c == NULL) {
        log_error("get conn for c %d from p %d failed: %s", sd, p->sd,
                  strerror(errno));
        status = close(sd);
        if (status < 0) {
            log_error("close c %d failed, ignored: %s", sd, strerror(errno));
        }
        return NC_ENOMEM;
    }
    c->sd = sd;
    c->family = addr.ss_family;
    c->addrlen = addr_len;
    c->ss = addr;
    c->addr = (struct sockaddr *)&c->ss;

    stats_pool_incr(ctx, c->owner, client_connections);

    status = nc_set_nonblocking(c->sd);
    if (status < 0) {
        log_error("set nonblock on c %d from p %d failed: %s", c->sd, p->sd,
                  strerror(errno));
        c->close(ctx, c);
        return status;
    }

    if (p->family == AF_INET || p->family == AF_INET6) {
        status = nc_set_tcpnodelay(c->sd);
        if (status < 0) {
            log_warn("set tcpnodelay on c %d from p %d failed, ignored: %s",
                     c->sd, p->sd, strerror(errno));
        }
    }

    status = event_add_conn(ctx->evb, c);
    if (status < 0) {
        log_error("event add conn from p %d failed: %s", p->sd,
                  strerror(errno));
        c->close(ctx, c);
        return status;
    }

    log_notice("accepted c %d on p %d from '%s'", c->sd, p->sd,
               nc_unresolve_peer_desc(c->sd));

    return NC_OK;
}