Esempio n. 1
0
void
erts_exit_flush_async(void)
{
#ifdef USE_THREADS
    int i;
    ErtsAsync a;
    a.port = NIL;
    /*
     * Terminate threads in order to flush queues. We do not
     * bother to clean everything up since we are about to
     * terminate the runtime system and a cleanup would only
     * delay the termination.
     */
    for (i = 0; i < erts_async_max_threads; i++)
	async_add(&a, async_q(i));
    for (i = 0; i < erts_async_max_threads; i++)
	erts_thr_join(async->queue[i].aq.thr_id, NULL);
#endif
}
Esempio n. 2
0
long driver_async(unsigned int* key,
        void (*async_invoke)(void*), void* async_data,
        void (*async_free)(void*)) {
    ErlAsync* a = (ErlAsync*) malloc(sizeof (ErlAsync));
    long id;

    a->next = NULL;
    a->prev = NULL;
    a->async_data = async_data;
    a->async_invoke = async_invoke;
    a->async_free = async_free;


    async_id = (async_id + 1) & 0x7fffffff;
    if (async_id == 0)
        async_id++;
    id = async_id;
    a->async_id = id;
    async_add(a, async_q);
    return id;
}
Esempio n. 3
0
int exit_async()
{
    int i;

    /* terminate threads */
    for (i = 0; i < erts_async_max_threads; i++) {
        ErlAsync* a = (ErlAsync*) erts_alloc(ERTS_ALC_T_ASYNC,
                                             sizeof(ErlAsync));
        a->port = NIL;
        async_add(a, &async_q[i]);
    }

    for (i = 0; i < erts_async_max_threads; i++) {
        erts_thr_join(async_q[i].thr, NULL);
        erts_mtx_destroy(&async_q[i].mtx);
        erts_cnd_destroy(&async_q[i].cv);
    }
#ifndef ERTS_SMP
    erts_mtx_destroy(&async_ready_mtx);
#endif
    if (async_q)
        erts_free(ERTS_ALC_T_ASYNC_Q, (void *) async_q);
    return 0;
}
Esempio n. 4
0
/*
** Schedule async_invoke on a worker thread
** NOTE will be syncrounous when threads are unsupported
** return values:
**  0  completed
**  -1 error
**  N  handle value (used with async_cancel)
**  arguments:
**      ix             driver index
**      key            pointer to secedule queue (NULL means round robin)
**      async_invoke   function to run in thread
**      async_data     data to pass to invoke function
**      async_free     function for relase async_data in case of failure
*/
long driver_async(ErlDrvPort ix, unsigned int* key,
                  void (*async_invoke)(void*), void* async_data,
                  void (*async_free)(void*))
{
    ErlAsync* a = (ErlAsync*) erts_alloc(ERTS_ALC_T_ASYNC, sizeof(ErlAsync));
    Port* prt = erts_drvport2port(ix);
    long id;
    unsigned int qix;


    if (!prt)
        return -1;

    ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));

    a->next = NULL;
    a->prev = NULL;
    a->hndl = (DE_Handle*)prt->drv_ptr->handle;
    a->port = prt->id;
    a->pdl = NULL;
    a->async_data = async_data;
    a->async_invoke = async_invoke;
    a->async_free = async_free;

    erts_smp_spin_lock(&async_id_lock);
    async_id = (async_id + 1) & 0x7fffffff;
    if (async_id == 0)
        async_id++;
    id = async_id;
    erts_smp_spin_unlock(&async_id_lock);

    a->async_id = id;

    if (key == NULL) {
        qix = (erts_async_max_threads > 0)
              ? (id % erts_async_max_threads) : 0;
    }
    else {
        qix = (erts_async_max_threads > 0) ?
              (*key % erts_async_max_threads) : 0;
        *key = qix;
    }
#ifdef USE_THREADS
    if (erts_async_max_threads > 0) {
        if (prt->port_data_lock) {
            driver_pdl_inc_refc(prt->port_data_lock);
            a->pdl = prt->port_data_lock;
        }
        async_add(a, &async_q[qix]);
        return id;
    }
#endif

    (*a->async_invoke)(a->async_data);

    if (async_ready(prt, a->async_data)) {
        if (a->async_free != NULL)
            (*a->async_free)(a->async_data);
    }
    erts_free(ERTS_ALC_T_ASYNC, (void *) a);

    return id;
}
Esempio n. 5
0
File: proxy.c Progetto: mlzboy/resys
void uwsgi_proxy(int proxyfd) {

	int efd ;

#ifdef __linux__
	struct epoll_event *eevents;
	struct epoll_event ev;
#elif defined(__sun__)
	struct pollfd *eevents;
	struct pollfd ev;
#else
	struct kevent *eevents;
	struct kevent ev;
#endif

	int max_events = 64;
	int nevents, i;
	const int nonblocking = 1;
	const int blocking = 0;

	char buffer[4096];
	ssize_t rlen;
	ssize_t wlen;
	int max_connections = sysconf(_SC_OPEN_MAX);

	int soopt;
	socklen_t solen = sizeof(int);

	int rc;

	struct uwsgi_proxy_connection *upcs;

	struct sockaddr_in upc_addr;
	socklen_t upc_len = sizeof(struct sockaddr_in);

	int next_node = -1;

	fprintf(stderr, "spawned uWSGI proxy (pid: %d)\n", getpid());

	fprintf(stderr, "allocating space for %d concurrent proxy connections\n", max_connections);

	// allocate memory for connections
	upcs = malloc(sizeof(struct uwsgi_proxy_connection) * max_connections);
	if (!upcs) {
		uwsgi_error("malloc()");
		exit(1);
	}
	memset(upcs, 0, sizeof(struct uwsgi_proxy_connection) * max_connections);


	efd = async_queue_init(proxyfd);
	if (efd < 0) {
		exit(1);
	}

#ifdef __linux__
	eevents = malloc(sizeof(struct epoll_event) * max_events);
	memset(&ev, 0, sizeof(struct epoll_event)); 
#elif defined(__sun)
	eevents = malloc(sizeof(struct pollfd) * max_events);
	memset(&ev, 0, sizeof(struct pollfd)); 
#else
	eevents = malloc(sizeof(struct kevent) * max_events);
	memset(&ev, 0, sizeof(struct kevent)); 
#endif

	if (!eevents) {
		uwsgi_error("malloc()");
		exit(1);
	}

	signal(SIGINT, (void *) &end_proxy);
	signal(SIGTERM, (void *) &reload_proxy);
	signal(SIGHUP, (void *) &reload_proxy);
	// and welcome to the loop...

	for (;;) {

		nevents = async_wait(efd, eevents, max_events, -1, 0);
		if (nevents < 0) {
			uwsgi_error("epoll_wait()");
			continue;
		}

		for (i = 0; i < nevents; i++) {


			if (eevents[i].ASYNC_FD == proxyfd) {

				if (eevents[i].ASYNC_IS_IN) {
					// new connection, accept it
					ev.ASYNC_FD = accept(proxyfd, (struct sockaddr *) &upc_addr, &upc_len);
					if (ev.ASYNC_FD < 0) {
						uwsgi_error("accept()");
						continue;
					}
					upcs[ev.ASYNC_FD].node = -1;

					// now connect to the first worker available

					upcs[ev.ASYNC_FD].dest_fd = socket(AF_INET, SOCK_STREAM, 0);
					if (upcs[ev.ASYNC_FD].dest_fd < 0) {
						uwsgi_error("socket()");
						uwsgi_proxy_close(upcs, ev.ASYNC_FD);
						continue;
					}
					upcs[upcs[ev.ASYNC_FD].dest_fd].node = -1;

					// set nonblocking
					if (ioctl(upcs[ev.ASYNC_FD].dest_fd, FIONBIO, &nonblocking)) {
						uwsgi_error("ioctl()");
						uwsgi_proxy_close(upcs, ev.ASYNC_FD);
						continue;
					}

					upcs[ev.ASYNC_FD].status = 0;
					upcs[ev.ASYNC_FD].retry = 0;
					next_node = uwsgi_proxy_find_next_node(next_node);
					if (next_node == -1) {
						fprintf(stderr, "unable to find an available worker in the cluster !\n");
						uwsgi_proxy_close(upcs, ev.ASYNC_FD);
						continue;
					}
					upcs[upcs[ev.ASYNC_FD].dest_fd].node = next_node;
					rc = connect(upcs[ev.ASYNC_FD].dest_fd, (struct sockaddr *) &uwsgi.shared->nodes[next_node].ucn_addr, sizeof(struct sockaddr_in));
					uwsgi.shared->nodes[next_node].connections++;

					if (!rc) {
						// connected to worker, put it in the epoll_list

						if (async_add(efd, ev.ASYNC_FD, ASYNC_IN)) {
							uwsgi_proxy_close(upcs, ev.ASYNC_FD);
							continue;
						}

						upcs[upcs[ev.ASYNC_FD].dest_fd].dest_fd = ev.ASYNC_FD;
						upcs[upcs[ev.ASYNC_FD].dest_fd].status = 0;
						upcs[upcs[ev.ASYNC_FD].dest_fd].retry = 0;

						ev.ASYNC_FD = upcs[ev.ASYNC_FD].dest_fd;

						if (async_add(efd, ev.ASYNC_FD, ASYNC_IN)) {
							uwsgi_proxy_close(upcs, ev.ASYNC_FD);
							continue;
						}

						// re-set blocking
						if (ioctl(upcs[upcs[ev.ASYNC_FD].dest_fd].dest_fd, FIONBIO, &blocking)) {
							uwsgi_error("ioctl()");
							uwsgi_proxy_close(upcs, ev.ASYNC_FD);
							continue;
						}

					}
					else if (errno == EINPROGRESS) {
						// the socket is waiting, set status to CONNECTING
						upcs[ev.ASYNC_FD].status = UWSGI_PROXY_WAITING;
						upcs[upcs[ev.ASYNC_FD].dest_fd].dest_fd = ev.ASYNC_FD;
						upcs[upcs[ev.ASYNC_FD].dest_fd].status = UWSGI_PROXY_CONNECTING;
						upcs[upcs[ev.ASYNC_FD].dest_fd].retry = 0;

						ev.ASYNC_FD = upcs[ev.ASYNC_FD].dest_fd;
						if (async_add(efd, ev.ASYNC_FD, ASYNC_OUT)) {
							uwsgi_proxy_close(upcs, ev.ASYNC_FD);
							continue;
						}
					}
					else {
						// connection failed, retry with the next node ?
						uwsgi_error("connect()");
						// close only when all node are tried
						uwsgi_proxy_close(upcs, ev.ASYNC_FD);
						continue;
					}


				}
				else {
					fprintf(stderr, "!!! something horrible happened to the uWSGI proxy, reloading it !!!\n");
					exit(1);
				}
			}
			else {
				// this is for clients/workers
				if (eevents[i].ASYNC_IS_IN) {

					// is this a connected client/worker ?
					//fprintf(stderr,"ready %d\n", upcs[eevents[i].data.fd].status);

					if (!upcs[eevents[i].ASYNC_FD].status) {
						if (upcs[eevents[i].ASYNC_FD].dest_fd >= 0) {

							rlen = read(eevents[i].ASYNC_FD, buffer, 4096);
							if (rlen < 0) {
								uwsgi_error("read()");
								uwsgi_proxy_close(upcs, eevents[i].ASYNC_FD);
								continue;
							}
							else if (rlen == 0) {
								uwsgi_proxy_close(upcs, eevents[i].ASYNC_FD);
								continue;
							}
							else {
								wlen = write(upcs[eevents[i].ASYNC_FD].dest_fd, buffer, rlen);
								if (wlen != rlen) {
									uwsgi_error("write()");
									uwsgi_proxy_close(upcs, eevents[i].ASYNC_FD);
									continue;
								}
							}
						}
						else {
							uwsgi_proxy_close(upcs, eevents[i].ASYNC_FD);
							continue;
						}
					}
					else if (upcs[eevents[i].ASYNC_FD].status == UWSGI_PROXY_WAITING) {
						// disconnected node
						continue;
					}
					else {
						fprintf(stderr, "UNKNOWN STATUS %d\n", upcs[eevents[i].ASYNC_FD].status);
						continue;
					}
				}
				else if (eevents[i].ASYNC_IS_OUT) {
					if (upcs[eevents[i].ASYNC_FD].status == UWSGI_PROXY_CONNECTING) {


#ifdef UWSGI_PROXY_USE_KQUEUE
						if (getsockopt(eevents[i].ASYNC_FD, SOL_SOCKET, SO_ERROR, (void *) (&soopt), &solen) < 0) {
							uwsgi_error("getsockopt()");
							uwsgi_proxy_close(upcs, ev.ASYNC_FD);
							continue;
						}
						/* is something bad ? */
						if (soopt) {
							fprintf(stderr, "connect() %s\n", strerror(soopt));
							// increase errors on node
							fprintf(stderr, "*** marking cluster node %d/%s as failed ***\n", upcs[eevents[i].ASYNC_FD].node, uwsgi.shared->nodes[upcs[eevents[i].ASYNC_FD].node].name);
							uwsgi.shared->nodes[upcs[eevents[i].ASYNC_FD].node].errors++;
							uwsgi.shared->nodes[upcs[eevents[i].ASYNC_FD].node].status = UWSGI_NODE_FAILED;
							uwsgi_proxy_close(upcs, ev.ASYNC_FD);
							continue;
						}

						// increase errors on node
#endif
						ev.ASYNC_FD = upcs[eevents[i].ASYNC_FD].dest_fd;
						upcs[ev.ASYNC_FD].status = 0;
						if (async_add(efd, ev.ASYNC_FD, ASYNC_IN)) {
							uwsgi_proxy_close(upcs, ev.ASYNC_FD);
							continue;
						}

						ev.ASYNC_FD = upcs[ev.ASYNC_FD].dest_fd;
						upcs[ev.ASYNC_FD].status = 0;

						if (async_mod(efd, ev.ASYNC_FD, ASYNC_IN)) {
							uwsgi_proxy_close(upcs, ev.ASYNC_FD);
							continue;
						}
						// re-set blocking
						if (ioctl(ev.ASYNC_FD, FIONBIO, &blocking)) {
							uwsgi_error("ioctl()");
							uwsgi_proxy_close(upcs, ev.ASYNC_FD);
							continue;
						}
					}
					else {
						fprintf(stderr, "strange event for %d\n", (int) eevents[i].ASYNC_FD);
					}
				}
				else {
					if (upcs[eevents[i].ASYNC_FD].status == UWSGI_PROXY_CONNECTING) {
						if (getsockopt(eevents[i].ASYNC_FD, SOL_SOCKET, SO_ERROR, (void *) (&soopt), &solen) < 0) {
							uwsgi_error("getsockopt()");
						}
						/* is something bad ? */
						if (soopt) {
							fprintf(stderr, "connect() %s\n", strerror(soopt));
						}

						// increase errors on node
						fprintf(stderr, "*** marking cluster node %d/%s as failed ***\n", upcs[eevents[i].ASYNC_FD].node, uwsgi.shared->nodes[upcs[eevents[i].ASYNC_FD].node].name);
						uwsgi.shared->nodes[upcs[eevents[i].ASYNC_FD].node].errors++;
						uwsgi.shared->nodes[upcs[eevents[i].ASYNC_FD].node].status = UWSGI_NODE_FAILED;
					}
					else {
						fprintf(stderr, "STRANGE EVENT !!! %d %d %d\n", (int) eevents[i].ASYNC_FD, (int) eevents[i].ASYNC_EV, upcs[eevents[i].ASYNC_FD].status);
					}
					uwsgi_proxy_close(upcs, eevents[i].ASYNC_FD);
					continue;
				}
			}
		}
	}
}
Esempio n. 6
0
/*
** Schedule async_invoke on a worker thread
** NOTE will be syncrounous when threads are unsupported
** return values:
**  0  completed 
**  -1 error
**  N  handle value
**  arguments:
**      ix             driver index 
**      key            pointer to secedule queue (NULL means round robin)
**      async_invoke   function to run in thread
**      async_data     data to pass to invoke function
**      async_free     function for relase async_data in case of failure
*/
long driver_async(ErlDrvPort ix, unsigned int* key,
		  void (*async_invoke)(void*), void* async_data,
		  void (*async_free)(void*))
{
    ErtsAsync* a;
    Port* prt;
    long id;
    unsigned int qix;
#if ERTS_USE_ASYNC_READY_Q
    Uint sched_id;
    ERTS_MSACC_PUSH_STATE();

    sched_id = erts_get_scheduler_id();
    if (!sched_id)
	sched_id = 1;
#else
    ERTS_MSACC_PUSH_STATE();
#endif

    prt = erts_drvport2port(ix);
    if (prt == ERTS_INVALID_ERL_DRV_PORT)
	return -1;

    ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt));

    a = (ErtsAsync*) erts_alloc(ERTS_ALC_T_ASYNC, sizeof(ErtsAsync));

#if ERTS_USE_ASYNC_READY_Q
    a->sched_id = sched_id;
#endif
    a->hndl = (DE_Handle*)prt->drv_ptr->handle;
    a->port = prt->common.id;
    a->pdl = NULL;
    a->async_data = async_data;
    a->async_invoke = async_invoke;
    a->async_free = async_free;

    if (!async)
	id = 0;
    else {
	do {
	    id = erts_atomic_inc_read_nob(&async->init.data.id);
	} while (id == 0);
	if (id < 0)
	    id *= -1;
	ASSERT(id > 0);
    }

    a->async_id = id;

    if (key == NULL) {
	qix = (erts_async_max_threads > 0)
	    ? (id % erts_async_max_threads) : 0;
    }
    else {
	qix = (erts_async_max_threads > 0) ? 
	    (*key % erts_async_max_threads) : 0;
	*key = qix;
    }
#ifdef USE_THREADS
    if (erts_async_max_threads > 0) {
	if (prt->port_data_lock) {
	    driver_pdl_inc_refc(prt->port_data_lock);
	    a->pdl = prt->port_data_lock;
	}
	async_add(a, async_q(qix));
	return id;
    }
#endif
    
    ERTS_MSACC_SET_STATE_CACHED(ERTS_MSACC_STATE_PORT);
    (*a->async_invoke)(a->async_data);
    ERTS_MSACC_POP_STATE();

    if (async_ready(prt, a->async_data)) {
	if (a->async_free != NULL) {
            ERTS_MSACC_SET_STATE_CACHED(ERTS_MSACC_STATE_PORT);
	    (*a->async_free)(a->async_data);
            ERTS_MSACC_POP_STATE();
        }
    }
    erts_free(ERTS_ALC_T_ASYNC, (void *) a);

    return id;
}