Example #1
42
void* console_thread_func(void*)
{
    /*Start logs*/

    while(bConsoleActive)
    {
        while(!qConsoleMessages.empty())
        {

            console_msg front_msg = qConsoleMessages.front();
            gdk_threads_enter();
            GtkTextIter console_end_iter;
            gtk_text_buffer_get_end_iter(console_buffer, &console_end_iter); /*Get end of console*/

            if(front_msg.type != important_online && front_msg.type != important_offline && front_msg.type != motd){

                gtk_text_buffer_insert_with_tags(console_buffer, &console_end_iter, front_msg.from_who.c_str(), strlen(front_msg.from_who.c_str()), console_format_normal, NULL);
                gtk_text_buffer_get_end_iter(console_buffer, &console_end_iter); /*Get end again*/

                gtk_text_buffer_insert_with_tags(console_buffer, &console_end_iter, ": ", strlen(": "), console_format_normal, NULL);/*Formating*/
                gtk_text_buffer_get_end_iter(console_buffer, &console_end_iter);
            }
            switch(front_msg.type){ //from_who uses console_format_normal

                case notification:
                    gtk_text_buffer_insert_with_tags(console_buffer, &console_end_iter, front_msg.strMessage.c_str(), strlen(front_msg.strMessage.c_str()), console_format_notification, NULL);
                break;

                case error:
                    gtk_text_buffer_insert_with_tags(console_buffer, &console_end_iter, front_msg.strMessage.c_str(), strlen(front_msg.strMessage.c_str()), console_format_error, NULL);
                break;

                case warning:
                    gtk_text_buffer_insert_with_tags(console_buffer, &console_end_iter, front_msg.strMessage.c_str(), strlen(front_msg.strMessage.c_str()), console_format_warning, NULL);
                break;

                case important_online:

                    gtk_text_buffer_insert_with_tags(console_buffer, &console_end_iter, front_msg.strMessage.c_str(), strlen(front_msg.strMessage.c_str()), console_format_important_online, NULL);

                break;

                case important_offline:

                    gtk_text_buffer_insert_with_tags(console_buffer, &console_end_iter, front_msg.strMessage.c_str(), strlen(front_msg.strMessage.c_str()), console_format_important_offline, NULL);

                break;

                case motd:

                    gtk_text_buffer_insert_with_tags(console_buffer, &console_end_iter, front_msg.strMessage.c_str(), strlen(front_msg.strMessage.c_str()), console_format_motd, NULL);
                    gtk_text_buffer_get_end_iter(console_buffer, &console_end_iter);
                                     gtk_text_buffer_insert(console_buffer, &console_end_iter, "\n", strlen("\n"));

                break;
            }


            gtk_text_buffer_get_end_iter(console_buffer, &console_end_iter);
            gtk_text_buffer_insert(console_buffer, &console_end_iter, "\n", strlen("\n"));
            gtk_text_buffer_get_end_iter(console_buffer, &console_end_iter);
            gtk_text_view_scroll_to_iter((GtkTextView*)console_text, &console_end_iter, 0, 0, 0, 0);
            gdk_threads_leave();
            qConsoleMessages.pop(); //pop 'em
                    poll(0,0, CONSOLE_DELAY); /*Don't want it to spout all out and shit*/
        }


    }

    /*Clean up and save logs here*/
    return NULL;
}
Example #2
1
static void *
manager_output_loop( DirectThread *thread, void *arg )
{
     int            len;
     struct pollfd  pf;
     VoodooManager *manager = arg;

     while (!manager->quit) {
          D_MAGIC_ASSERT( manager, VoodooManager );

          pf.fd     = manager->fd;
          pf.events = POLLOUT;

          switch (poll( &pf, 1, 100 )) {
               case -1:
                    if (errno != EINTR) {
                         D_PERROR( "Voodoo/Output: Could not poll() the socket!\n" );
                         usleep( 200000 );
                    }
                    /* fall through */

               case 0:
                    continue;
          }

          pthread_mutex_lock( &manager->output.lock );

          while (manager->output.start == manager->output.end) {
               struct timeval  now;
               struct timespec timeout;

               D_ASSUME( manager->output.start == 0 );
               D_ASSUME( manager->output.end == 0 );

               gettimeofday( &now, NULL );

               timeout.tv_sec  = now.tv_sec;
               timeout.tv_nsec = (now.tv_usec + 50000) * 1000;

               timeout.tv_sec  += timeout.tv_nsec / 1000000000;
               timeout.tv_nsec %= 1000000000;

               pthread_cond_timedwait( &manager->output.wait, &manager->output.lock, &timeout );

               if (manager->quit)
                    break;
          }

          if (!manager->quit) {
               len = send( manager->fd, manager->output.buffer + manager->output.start,
                           manager->output.end - manager->output.start, MSG_DONTWAIT );
               if (len < 0) {
                    switch (errno) {
                         case EINTR:
                         case EAGAIN:
                              break;
                         default:
                              D_PERROR( "Voodoo/Output: Could not send() data!\n" );
                              usleep( 200000 );
                    }
               }
               else {
                    D_DEBUG( "Voodoo/Output: Sent %d/%d bytes...\n", len, manager->output.end - manager->output.start );

                    manager->output.start += len;

                    if (manager->output.start == manager->output.end) {
                         manager->output.start = manager->output.end = 0;

                         pthread_cond_broadcast( &manager->output.wait );
                    }
               }
          }

          pthread_mutex_unlock( &manager->output.lock );
     }

     return NULL;
}
Example #3
0
int multipipe(int size, int fd[][2])
{
	int i, r, w, p;

	char ** buffers;
	if ((buffers = malloc(size * sizeof(char*))) == NULL) {
		perror("malloc");
		return -1;
	}
	for (i = 0 ; i < size ; i++) {
		if ((buffers[i] = malloc(BUFSIZE * sizeof(char))) == NULL) {
			perror("malloc");
			return -1;
		}
	}

	int * c;
	if ((c = malloc(size * sizeof(int))) == NULL) {
		perror("malloc");
		return -1;
	}
	for (i = 0 ; i < size ; i++) {
		c[i] = 0;
	}

	int * t;
	if ((t = malloc(size * sizeof(int))) == NULL) {
		perror("malloc");
		return -1;
	}

	struct pollfd * pfd = NULL;
	if ((pfd = malloc(2 * size * sizeof(struct pollfd))) == NULL) {
		perror("malloc");
		exit(1);
	}

	while (1) {

		p = 0;
		for (i = 0 ; i < size ; i++) {
			if (c[i] < BUFSIZE) {
				pfd[p].fd = fd[i][0];
				pfd[p].events = POLLIN;
				pfd[p].revents = 0;
				t[p] = i;
				p++;
			}

			if (c[i] > 0) {
				pfd[p].fd = fd[i][1];
				pfd[p].events = POLLOUT;
				pfd[p].revents = 0;
				t[p] = i;
				p++;
			}
		}

		if ((r = poll(pfd, p, -1)) <= 0) {
			if (r == 0 || errno == EINTR)
				continue;
			perror("poll");
			return -1;
		}

		for (i = 0 ; i < p ; i++) {
			if (pfd[i].revents & POLLIN) {
				if ((r = read(fd[t[i]][0], buffers[t[i]], BUFSIZE-c[t[i]])) < 0) {
					if ((r == 0) || (errno != EAGAIN && errno != EWOULDBLOCK)) {
						perror("read");
						return -1;
					}
				}
				c[t[i]] += r;
			}
			if (pfd[i].revents & POLLOUT) {
				if ((w = write(fd[t[i]][1], buffers[t[i]], c[t[i]])) < 0) {
					perror("write");
					return -1;
				}
				if (w != 0) {
					c[t[i]] -= w;
					memmove(buffers[t[i]], &(buffers[t[i]]), c[t[i]]);
				}
			}
		}
	}
}
Example #4
0
int read_gps_mtk(int *fd, char *gps_rx_buffer, int buffer_size)  // returns 1 if the thread should terminate
{
//	printf("in read_gps_mtk\n");
	uint8_t ret = 0;

	uint8_t c;

	int rx_count = 0;
	int gpsRxOverflow = 0;

	struct pollfd fds;
	fds.fd = *fd;
	fds.events = POLLIN;

	// This blocks the task until there is something on the buffer
	while (1) {
		//check if the thread should terminate
		if (terminate_gps_thread == true) {
//			printf("terminate_gps_thread=%u ", terminate_gps_thread);
//			printf("exiting mtk thread\n");
//			fflush(stdout);
			ret = 1;
			break;
		}

		if (poll(&fds, 1, 1000) > 0) {
			if (read(*fd, &c, 1) > 0) {
//				printf("Read %x\n",c);
				if (rx_count >= buffer_size) {
					// The buffer is already full and we haven't found a valid NMEA sentence.
					// Flush the buffer and note the overflow event.
					gpsRxOverflow++;
					rx_count = 0;
					mtk_decode_init();

					if (gps_verbose) printf("[gps] Buffer full\r\n");

				} else {
					//gps_rx_buffer[rx_count] = c;
					rx_count++;

				}

				int msg_read = mtk_parse(c, gps_rx_buffer);

				if (msg_read > 0) {
					//			printf("Found sequence\n");
					break;
				}

			} else {
				break;
			}

		} else {
			break;
		}

	}

	return ret;
}
Example #5
0
int zmq_poll (zmq_pollitem_t *items_, int nitems_, long timeout_)
{
    if (!items_) {
        errno = EFAULT;
        return -1;
    }
#if defined ZMQ_POLL_BASED_ON_POLL
    if (unlikely (nitems_ < 0)) {
        errno = EINVAL;
        return -1;
    }
    if (unlikely (nitems_ == 0)) {
        if (timeout_ == 0)
            return 0;
#if defined ZMQ_HAVE_WINDOWS
        Sleep (timeout_ > 0 ? timeout_ : INFINITE);
        return 0;
#elif defined ZMQ_HAVE_ANDROID
        usleep (timeout_ * 1000);
        return 0;
#else
        return usleep (timeout_ * 1000);
#endif
    }
    zmq::clock_t clock;
    uint64_t now = 0;
    uint64_t end = 0;

    pollfd *pollfds = (pollfd*) malloc (nitems_ * sizeof (pollfd));
    alloc_assert (pollfds);

    //  Build pollset for poll () system call.
    for (int i = 0; i != nitems_; i++) {

        //  If the poll item is a 0MQ socket, we poll on the file descriptor
        //  retrieved by the ZMQ_FD socket option.
        if (items_ [i].socket) {
            size_t zmq_fd_size = sizeof (zmq::fd_t);
            if (zmq_getsockopt (items_ [i].socket, ZMQ_FD, &pollfds [i].fd,
                &zmq_fd_size) == -1) {
                free (pollfds);
                return -1;
            }
            pollfds [i].events = items_ [i].events ? POLLIN : 0;
        }
        //  Else, the poll item is a raw file descriptor. Just convert the
        //  events to normal POLLIN/POLLOUT for poll ().
        else {
            pollfds [i].fd = items_ [i].fd;
            pollfds [i].events =
                (items_ [i].events & ZMQ_POLLIN ? POLLIN : 0) |
                (items_ [i].events & ZMQ_POLLOUT ? POLLOUT : 0);
        }
    }

    bool first_pass = true;
    int nevents = 0;

    while (true) {

         //  Compute the timeout for the subsequent poll.
         int timeout;
         if (first_pass)
             timeout = 0;
         else if (timeout_ < 0)
             timeout = -1;
         else
             timeout = end - now;

        //  Wait for events.
        while (true) {
            int rc = poll (pollfds, nitems_, timeout);
            if (rc == -1 && errno == EINTR) {
                free (pollfds);
                return -1;
            }
            errno_assert (rc >= 0);
            break;
        }

        //  Check for the events.
        for (int i = 0; i != nitems_; i++) {

            items_ [i].revents = 0;

            //  The poll item is a 0MQ socket. Retrieve pending events
            //  using the ZMQ_EVENTS socket option.
            if (items_ [i].socket) {
                size_t zmq_events_size = sizeof (uint32_t);
                uint32_t zmq_events;
                if (zmq_getsockopt (items_ [i].socket, ZMQ_EVENTS, &zmq_events,
                    &zmq_events_size) == -1) {
                    free (pollfds);
                    return -1;
                }
                if ((items_ [i].events & ZMQ_POLLOUT) &&
                      (zmq_events & ZMQ_POLLOUT))
                    items_ [i].revents |= ZMQ_POLLOUT;
                if ((items_ [i].events & ZMQ_POLLIN) &&
                      (zmq_events & ZMQ_POLLIN))
                    items_ [i].revents |= ZMQ_POLLIN;
            }
            //  Else, the poll item is a raw file descriptor, simply convert
            //  the events to zmq_pollitem_t-style format.
            else {
                if (pollfds [i].revents & POLLIN)
                    items_ [i].revents |= ZMQ_POLLIN;
                if (pollfds [i].revents & POLLOUT)
                    items_ [i].revents |= ZMQ_POLLOUT;
                if (pollfds [i].revents & ~(POLLIN | POLLOUT))
                    items_ [i].revents |= ZMQ_POLLERR;
            }

            if (items_ [i].revents)
                nevents++;
        }

        //  If timout is zero, exit immediately whether there are events or not.
        if (timeout_ == 0)
            break;

        //  If there are events to return, we can exit immediately.
        if (nevents)
            break;

        //  At this point we are meant to wait for events but there are none.
        //  If timeout is infinite we can just loop until we get some events.
        if (timeout_ < 0) {
            if (first_pass)
                first_pass = false;
            continue;
        }

        //  The timeout is finite and there are no events. In the first pass
        //  we get a timestamp of when the polling have begun. (We assume that
        //  first pass have taken negligible time). We also compute the time
        //  when the polling should time out.
        if (first_pass) {
            now = clock.now_ms ();
            end = now + timeout_;
            if (now == end)
                break;
            first_pass = false;
            continue;
        }

        //  Find out whether timeout have expired.
        now = clock.now_ms ();
        if (now >= end)
            break;
    }

    free (pollfds);
    return nevents;

#elif defined ZMQ_POLL_BASED_ON_SELECT

    if (unlikely (nitems_ < 0)) {
        errno = EINVAL;
        return -1;
    }
    if (unlikely (nitems_ == 0)) {
        if (timeout_ == 0)
            return 0;
#if defined ZMQ_HAVE_WINDOWS
        Sleep (timeout_ > 0 ? timeout_ : INFINITE);
        return 0;
#else
        return usleep (timeout_ * 1000);
#endif
    }
    zmq::clock_t clock;
    uint64_t now = 0;
    uint64_t end = 0;

    //  Ensure we do not attempt to select () on more than FD_SETSIZE
    //  file descriptors.
    zmq_assert (nitems_ <= FD_SETSIZE);

    fd_set pollset_in;
    FD_ZERO (&pollset_in);
    fd_set pollset_out;
    FD_ZERO (&pollset_out);
    fd_set pollset_err;
    FD_ZERO (&pollset_err);

    zmq::fd_t maxfd = 0;

    //  Build the fd_sets for passing to select ().
    for (int i = 0; i != nitems_; i++) {

        //  If the poll item is a 0MQ socket we are interested in input on the
        //  notification file descriptor retrieved by the ZMQ_FD socket option.
        if (items_ [i].socket) {
            size_t zmq_fd_size = sizeof (zmq::fd_t);
            zmq::fd_t notify_fd;
            if (zmq_getsockopt (items_ [i].socket, ZMQ_FD, &notify_fd,
                &zmq_fd_size) == -1)
                return -1;
            if (items_ [i].events) {
                FD_SET (notify_fd, &pollset_in);
                if (maxfd < notify_fd)
                    maxfd = notify_fd;
            }
        }
        //  Else, the poll item is a raw file descriptor. Convert the poll item
        //  events to the appropriate fd_sets.
        else {
            if (items_ [i].events & ZMQ_POLLIN)
                FD_SET (items_ [i].fd, &pollset_in);
            if (items_ [i].events & ZMQ_POLLOUT)
                FD_SET (items_ [i].fd, &pollset_out);
            if (items_ [i].events & ZMQ_POLLERR)
                FD_SET (items_ [i].fd, &pollset_err);
            if (maxfd < items_ [i].fd)
                maxfd = items_ [i].fd;
        }
    }

    bool first_pass = true;
    int nevents = 0;
    fd_set inset, outset, errset;

    while (true) {

        //  Compute the timeout for the subsequent poll.
        timeval timeout;
        timeval *ptimeout;
        if (first_pass) {
            timeout.tv_sec = 0;
            timeout.tv_usec = 0;
            ptimeout = &timeout;
        }
        else if (timeout_ < 0)
            ptimeout = NULL;
        else {
            timeout.tv_sec = (long) ((end - now) / 1000);
            timeout.tv_usec = (long) ((end - now) % 1000 * 1000);
            ptimeout = &timeout;
        }

        //  Wait for events. Ignore interrupts if there's infinite timeout.
        while (true) {
            memcpy (&inset, &pollset_in, sizeof (fd_set));
            memcpy (&outset, &pollset_out, sizeof (fd_set));
            memcpy (&errset, &pollset_err, sizeof (fd_set));
#if defined ZMQ_HAVE_WINDOWS
            int rc = select (0, &inset, &outset, &errset, ptimeout);
            if (unlikely (rc == SOCKET_ERROR)) {
                errno = zmq::wsa_error_to_errno (WSAGetLastError ());
                wsa_assert (errno == ENOTSOCK);
                return -1;
            }
#else
            int rc = select (maxfd + 1, &inset, &outset, &errset, ptimeout);
            if (unlikely (rc == -1)) {
                errno_assert (errno == EINTR || errno == EBADF);
                return -1;
            }
#endif
            break;
        }

        //  Check for the events.
        for (int i = 0; i != nitems_; i++) {

            items_ [i].revents = 0;

            //  The poll item is a 0MQ socket. Retrieve pending events
            //  using the ZMQ_EVENTS socket option.
            if (items_ [i].socket) {
                size_t zmq_events_size = sizeof (uint32_t);
                uint32_t zmq_events;
                if (zmq_getsockopt (items_ [i].socket, ZMQ_EVENTS, &zmq_events,
                      &zmq_events_size) == -1)
                    return -1;
                if ((items_ [i].events & ZMQ_POLLOUT) &&
                      (zmq_events & ZMQ_POLLOUT))
                    items_ [i].revents |= ZMQ_POLLOUT;
                if ((items_ [i].events & ZMQ_POLLIN) &&
                      (zmq_events & ZMQ_POLLIN))
                    items_ [i].revents |= ZMQ_POLLIN;
            }
            //  Else, the poll item is a raw file descriptor, simply convert
            //  the events to zmq_pollitem_t-style format.
            else {
                if (FD_ISSET (items_ [i].fd, &inset))
                    items_ [i].revents |= ZMQ_POLLIN;
                if (FD_ISSET (items_ [i].fd, &outset))
                    items_ [i].revents |= ZMQ_POLLOUT;
                if (FD_ISSET (items_ [i].fd, &errset))
                    items_ [i].revents |= ZMQ_POLLERR;
            }

            if (items_ [i].revents)
                nevents++;
        }

        //  If timout is zero, exit immediately whether there are events or not.
        if (timeout_ == 0)
            break;

        //  If there are events to return, we can exit immediately.
        if (nevents)
            break;

        //  At this point we are meant to wait for events but there are none.
        //  If timeout is infinite we can just loop until we get some events.
        if (timeout_ < 0) {
            if (first_pass)
                first_pass = false;
            continue;
        }

        //  The timeout is finite and there are no events. In the first pass
        //  we get a timestamp of when the polling have begun. (We assume that
        //  first pass have taken negligible time). We also compute the time
        //  when the polling should time out.
        if (first_pass) {
            now = clock.now_ms ();
            end = now + timeout_;
            if (now == end)
                break;
            first_pass = false;
            continue;
        }

        //  Find out whether timeout have expired.
        now = clock.now_ms ();
        if (now >= end)
            break;
    }

    return nevents;

#else
    //  Exotic platforms that support neither poll() nor select().
    errno = ENOTSUP;
    return -1;
#endif
}
Example #6
0
int tls_do_handshake(rdpTls* tls, BOOL clientMode)
{
	CryptoCert cert;
	int verify_status, status;

	do
	{
#ifdef HAVE_POLL_H
		struct pollfd pollfds;
#else
		struct timeval tv;
		fd_set rset;
#endif
		int fd;

		status = BIO_do_handshake(tls->bio);

		if (status == 1)
			break;

		if (!BIO_should_retry(tls->bio))
			return -1;

		/* we select() only for read even if we should test both read and write
		 * depending of what have blocked */
		fd = BIO_get_fd(tls->bio, NULL);

		if (fd < 0)
		{
			WLog_ERR(TAG,  "unable to retrieve BIO fd");
			return -1;
		}

#ifdef HAVE_POLL_H
		pollfds.fd = fd;
		pollfds.events = POLLIN;
		pollfds.revents = 0;

		do
		{
			status = poll(&pollfds, 1, 10 * 1000);
		}
		while ((status < 0) && (errno == EINTR));
#else
		FD_ZERO(&rset);
		FD_SET(fd, &rset);
		tv.tv_sec = 0;
		tv.tv_usec = 10 * 1000; /* 10ms */

		status = _select(fd + 1, &rset, NULL, NULL, &tv);
#endif
		if (status < 0)
		{
			WLog_ERR(TAG,  "error during select()");
			return -1;
		}
	}
	while (TRUE);

	cert = tls_get_certificate(tls, clientMode);
	if (!cert)
	{
		WLog_ERR(TAG,  "tls_get_certificate failed to return the server certificate.");
		return -1;
	}

	tls->Bindings = tls_get_channel_bindings(cert->px509);
	if (!tls->Bindings)
	{
		WLog_ERR(TAG,  "unable to retrieve bindings");
		verify_status = -1;
		goto out;
	}

	if (!crypto_cert_get_public_key(cert, &tls->PublicKey, &tls->PublicKeyLength))
	{
		WLog_ERR(TAG,  "crypto_cert_get_public_key failed to return the server public key.");
		verify_status = -1;
		goto out;
	}

	/* Note: server-side NLA needs public keys (keys from us, the server) but no
	 * 		certificate verify
	 */
	verify_status = 1;
	if (clientMode)
	{
		verify_status = tls_verify_certificate(tls, cert, tls->hostname, tls->port);

		if (verify_status < 1)
		{
			WLog_ERR(TAG,  "certificate not trusted, aborting.");
			tls_disconnect(tls);
			verify_status = 0;
		}
	}

out:
	tls_free_certificate(cert);

	return verify_status;
}
Example #7
0
void
lgssd_run()
{
	int			ret;
	struct sigaction	dn_act;
	int			fd;
	time_t			child_check = 0;
	pid_t			child_pid;

	/* Taken from linux/Documentation/dnotify.txt: */
	dn_act.sa_sigaction = dir_notify_handler;
	sigemptyset(&dn_act.sa_mask);
	dn_act.sa_flags = SA_SIGINFO;
	sigaction(DNOTIFY_SIGNAL, &dn_act, NULL);

	if ((fd = open(pipefs_dir, O_RDONLY)) == -1) {
		printerr(0, "ERROR: failed to open %s: %s\n",
			 pipefs_dir, strerror(errno));
		return;
	}
	fcntl(fd, F_SETSIG, DNOTIFY_SIGNAL);
	fcntl(fd, F_NOTIFY, DN_CREATE|DN_DELETE|DN_MODIFY|DN_MULTISHOT);

	init_client_list();

	while (1) {
		while (dir_changed) {
			dir_changed = 0;
			printerr(2, "pipefs root dir changed\n");
			if (update_client_list()) {
				printerr(0, "ERROR: couldn't update "
					 "client list\n");
				goto out;
			}
		}

		/* every 5s cleanup possible zombies of child processes */
		if (time(NULL) - child_check >= 5) {
			printerr(3, "check zombie children...\n");

			while (1) {
				child_pid = waitpid(-1, NULL, WNOHANG);
				if (child_pid <= 0)
					break;

				printerr(2, "terminate zombie child: %d\n",
					 child_pid);
			}

			child_check = time(NULL);
		}

		/* race condition here: dir_changed could be set before we
		 * enter the poll, and we'd never notice if it weren't for the
		 * timeout. */
		ret = poll(pollarray, pollsize, POLL_MILLISECS);
		if (ret < 0) {
			if (errno != EINTR)
				printerr(0,
					 "WARNING: error return from poll\n");
		} else if (ret == 0) {
			/* timeout */
		} else { /* ret > 0 */
			scan_poll_results(ret);
		}
	}
out:
	close(fd);
	return;
}
int ev_get(int timeout_ms)
{
    int r, i;

	r = poll(ev_fds, ev_count, timeout_ms);
	if (r <= 0)
		return -1;

	for (i=0;i<ev_count;i++) {
		if ((ev_fds[i].revents & POLLIN) == 0)
			continue;

		if (ev_type[i] == EV_TYPE_KEYBOARD) {
			struct input_event ev;
			r = read(ev_fds[i].fd, &ev, sizeof(ev));
			fprintf(stderr, "keyboard event: (%x,%x,%x)\n", ev.type, ev.code, ev.value);
			if(r == sizeof(ev)) {

				/* POWER key */
				if ((ev.type == EV_KEY) && (ev.code == EV_POWER_KEY_CODE) && (ev.value == EV_KEY_VALUE_DOWN))
					return EVENT_POWER_KEY_DOWN;
				if ((ev.type == EV_KEY) && (ev.code == EV_POWER_KEY_CODE) && (ev.value == EV_KEY_VALUE_UP))
					return EVENT_POWER_KEY_UP;

				/* VOLUMEDOWN key */
				if ((ev.type == EV_KEY) && (ev.code == EV_VOLUMEDOWN_KEY_CODE) && (ev.value == EV_KEY_VALUE_DOWN))
					return EVENT_VOLUMEDOWN_KEY_DOWN;
				if ((ev.type == EV_KEY) && (ev.code == EV_VOLUMEDOWN_KEY_CODE) && (ev.value == EV_KEY_VALUE_UP))
					return EVENT_VOLUMEDOWN_KEY_UP;

				/* VOLUMEUP key */
				if ((ev.type == EV_KEY) && (ev.code == EV_VOLUMEUP_KEY_CODE) && (ev.value == EV_KEY_VALUE_DOWN))
					return EVENT_VOLUMEUP_KEY_DOWN;
				if ((ev.type == EV_KEY) && (ev.code == EV_VOLUMEUP_KEY_CODE) && (ev.value == EV_KEY_VALUE_UP))
					return EVENT_VOLUMEUP_KEY_UP;

				/* CAMERA key */
				if ((ev.type == EV_KEY) && (ev.code == EV_CAMERA_KEY_CODE) && (ev.value == EV_KEY_VALUE_DOWN))
					return EVENT_CAMERA_KEY_DOWN;
				if ((ev.type == EV_KEY) && (ev.code == EV_CAMERA_KEY_CODE) && (ev.value == EV_KEY_VALUE_UP))
					return EVENT_CAMERA_KEY_UP;

				return -1;
			}
		} else if (ev_type[i] == EV_TYPE_UEVENT) {

			char msg[1024];
			while ((r = recv(ev_fds[i].fd, msg, sizeof(msg), 0)) > 0)
				;
			if(strstr(msg, CHARGER_DRIVER))
			{
				ALOGD("pm8921_battery UEVENT msg : %s\n", msg);
				return EVENT_BATTERY;

			}

		}
	}

    return -1;
}
Example #9
0
File: select.c Project: kevinw/curl
/*
 * This is an internal function used for waiting for read or write
 * events on a pair of file descriptors.  It uses poll() when a fine
 * poll() is available, in order to avoid limits with FD_SETSIZE,
 * otherwise select() is used.  An error is returned if select() is
 * being used and a file descriptor is too large for FD_SETSIZE.
 * A negative timeout value makes this function wait indefinitely,
 * unles no valid file descriptor is given, when this happens the
 * negative timeout is ignored and the function times out immediately.
 * When compiled with CURL_ACKNOWLEDGE_EINTR defined, EINTR condition
 * is honored and function might exit early without awaiting timeout,
 * otherwise EINTR will be ignored.
 *
 * Return values:
 *   -1 = system call error or fd >= FD_SETSIZE
 *    0 = timeout
 *    CURL_CSELECT_IN | CURL_CSELECT_OUT | CURL_CSELECT_ERR
 */
int Curl_socket_ready(curl_socket_t readfd, curl_socket_t writefd,
                      int timeout_ms)
{
#ifdef HAVE_POLL_FINE
  struct pollfd pfd[2];
  int num;
#else
  struct timeval pending_tv;
  struct timeval *ptimeout;
  fd_set fds_read;
  fd_set fds_write;
  fd_set fds_err;
  curl_socket_t maxfd;
#endif
  struct timeval initial_tv = {0,0};
  int pending_ms = 0;
  int error;
  int r;
  int ret;

  if((readfd == CURL_SOCKET_BAD) && (writefd == CURL_SOCKET_BAD)) {
    r = wait_ms(timeout_ms);
    return r;
  }

  /* Avoid initial timestamp, avoid gettimeofday() call, when elapsed
     time in this function does not need to be measured. This happens
     when function is called with a zero timeout or a negative timeout
     value indicating a blocking call should be performed. */

  if(timeout_ms > 0) {
    pending_ms = timeout_ms;
    initial_tv = curlx_tvnow();
  }

#ifdef HAVE_POLL_FINE

  num = 0;
  if(readfd != CURL_SOCKET_BAD) {
    pfd[num].fd = readfd;
    pfd[num].events = POLLRDNORM|POLLIN|POLLRDBAND|POLLPRI;
    pfd[num].revents = 0;
    num++;
  }
  if(writefd != CURL_SOCKET_BAD) {
    pfd[num].fd = writefd;
    pfd[num].events = POLLWRNORM|POLLOUT;
    pfd[num].revents = 0;
    num++;
  }

  do {
    if(timeout_ms < 0)
      pending_ms = -1;
    else if(!timeout_ms)
      pending_ms = 0;
    r = poll(pfd, num, pending_ms);
    if(r != -1)
      break;
    error = SOCKERRNO;
    if(error && error_not_EINTR)
      break;
    if(timeout_ms > 0) {
      pending_ms = timeout_ms - elapsed_ms;
      if(pending_ms <= 0)
        break;
    }
  } while(r == -1);

  if(r < 0)
    return -1;
  if(r == 0)
    return 0;

  ret = 0;
  num = 0;
  if(readfd != CURL_SOCKET_BAD) {
    if(pfd[num].revents & (POLLRDNORM|POLLIN|POLLERR|POLLHUP))
      ret |= CURL_CSELECT_IN;
    if(pfd[num].revents & (POLLRDBAND|POLLPRI|POLLNVAL))
      ret |= CURL_CSELECT_ERR;
    num++;
  }
  if(writefd != CURL_SOCKET_BAD) {
    if(pfd[num].revents & (POLLWRNORM|POLLOUT))
      ret |= CURL_CSELECT_OUT;
    if(pfd[num].revents & (POLLERR|POLLHUP|POLLNVAL))
      ret |= CURL_CSELECT_ERR;
  }

  return ret;

#else  /* HAVE_POLL_FINE */

  FD_ZERO(&fds_err);
  maxfd = (curl_socket_t)-1;

  FD_ZERO(&fds_read);
  if(readfd != CURL_SOCKET_BAD) {
    VERIFY_SOCK(readfd);
    FD_SET(readfd, &fds_read);
    FD_SET(readfd, &fds_err);
    maxfd = readfd;
  }

  FD_ZERO(&fds_write);
  if(writefd != CURL_SOCKET_BAD) {
    VERIFY_SOCK(writefd);
    FD_SET(writefd, &fds_write);
    FD_SET(writefd, &fds_err);
    if(writefd > maxfd)
      maxfd = writefd;
  }

  ptimeout = (timeout_ms < 0) ? NULL : &pending_tv;

  do {
    if(timeout_ms > 0) {
      pending_tv.tv_sec = pending_ms / 1000;
      pending_tv.tv_usec = (pending_ms % 1000) * 1000;
    }
    else if(!timeout_ms) {
      pending_tv.tv_sec = 0;
      pending_tv.tv_usec = 0;
    }
    r = select((int)maxfd + 1, &fds_read, &fds_write, &fds_err, ptimeout);
    if(r != -1)
      break;
    error = SOCKERRNO;
    if(error && error_not_EINTR)
      break;
    if(timeout_ms > 0) {
      pending_ms = timeout_ms - elapsed_ms;
      if(pending_ms <= 0)
        break;
    }
  } while(r == -1);

  if(r < 0)
    return -1;
  if(r == 0)
    return 0;

  ret = 0;
  if(readfd != CURL_SOCKET_BAD) {
    if(FD_ISSET(readfd, &fds_read))
      ret |= CURL_CSELECT_IN;
    if(FD_ISSET(readfd, &fds_err))
      ret |= CURL_CSELECT_ERR;
  }
  if(writefd != CURL_SOCKET_BAD) {
    if(FD_ISSET(writefd, &fds_write))
      ret |= CURL_CSELECT_OUT;
    if(FD_ISSET(writefd, &fds_err))
      ret |= CURL_CSELECT_ERR;
  }

  return ret;

#endif  /* HAVE_POLL_FINE */

}
Example #10
0
				SE(WindowHandle _handle)
					: m_ev(poll(_handle) )
				{
				}
Example #11
0
			struct SE { const Event* m_ev; SE() : m_ev(poll() ) {} ~SE() { if (NULL != m_ev) { release(m_ev); } } } scopeEvent;
Example #12
0
int kdbus_test_chat(struct kdbus_test_env *env)
{
	int ret, cookie;
	struct kdbus_conn *conn_a, *conn_b;
	struct pollfd fds[2];
	uint64_t flags;
	int count;

	conn_a = kdbus_hello(env->buspath, 0, NULL, 0);
	conn_b = kdbus_hello(env->buspath, 0, NULL, 0);
	ASSERT_RETURN(conn_a && conn_b);

	flags = KDBUS_NAME_ALLOW_REPLACEMENT;
	ret = kdbus_name_acquire(conn_a, "foo.bar.test", &flags);
	ASSERT_RETURN(ret == 0);

	ret = kdbus_name_acquire(conn_a, "foo.bar.baz", NULL);
	ASSERT_RETURN(ret == 0);

	flags = KDBUS_NAME_QUEUE;
	ret = kdbus_name_acquire(conn_b, "foo.bar.baz", &flags);
	ASSERT_RETURN(ret == 0);

	ret = kdbus_name_acquire(conn_a, "foo.bar.double", NULL);
	ASSERT_RETURN(ret == 0);

	ret = kdbus_name_acquire(conn_a, "foo.bar.double", NULL);
	ASSERT_RETURN(ret == -EALREADY);

	ret = kdbus_name_release(conn_a, "foo.bar.double");
	ASSERT_RETURN(ret == 0);

	ret = kdbus_name_release(conn_a, "foo.bar.double");
	ASSERT_RETURN(ret == -ESRCH);

	ret = kdbus_list(conn_b, KDBUS_LIST_UNIQUE |
				 KDBUS_LIST_NAMES  |
				 KDBUS_LIST_QUEUED |
				 KDBUS_LIST_ACTIVATORS);
	ASSERT_RETURN(ret == 0);

	ret = kdbus_add_match_empty(conn_a);
	ASSERT_RETURN(ret == 0);

	ret = kdbus_add_match_empty(conn_b);
	ASSERT_RETURN(ret == 0);

	cookie = 0;
	ret = kdbus_msg_send(conn_b, NULL, 0xc0000000 | cookie, 0, 0, 0,
			     KDBUS_DST_ID_BROADCAST);
	ASSERT_RETURN(ret == 0);

	fds[0].fd = conn_a->fd;
	fds[1].fd = conn_b->fd;

	kdbus_printf("-- entering poll loop ...\n");

	for (count = 0;; count++) {
		int i, nfds = sizeof(fds) / sizeof(fds[0]);

		for (i = 0; i < nfds; i++) {
			fds[i].events = POLLIN | POLLPRI | POLLHUP;
			fds[i].revents = 0;
		}

		ret = poll(fds, nfds, 3000);
		ASSERT_RETURN(ret >= 0);

		if (fds[0].revents & POLLIN) {
			if (count > 2)
				kdbus_name_release(conn_a, "foo.bar.baz");

			ret = kdbus_msg_recv(conn_a, NULL, NULL);
			ASSERT_RETURN(ret == 0);
			ret = kdbus_msg_send(conn_a, NULL,
					     0xc0000000 | cookie++,
					     0, 0, 0, conn_b->id);
			ASSERT_RETURN(ret == 0);
		}

		if (fds[1].revents & POLLIN) {
			ret = kdbus_msg_recv(conn_b, NULL, NULL);
			ASSERT_RETURN(ret == 0);
			ret = kdbus_msg_send(conn_b, NULL,
					     0xc0000000 | cookie++,
					     0, 0, 0, conn_a->id);
			ASSERT_RETURN(ret == 0);
		}

		ret = kdbus_list(conn_b, KDBUS_LIST_UNIQUE |
					 KDBUS_LIST_NAMES  |
					 KDBUS_LIST_QUEUED |
					 KDBUS_LIST_ACTIVATORS);
		ASSERT_RETURN(ret == 0);

		if (count > 10)
			break;
	}

	kdbus_printf("-- closing bus connections\n");
	kdbus_conn_free(conn_a);
	kdbus_conn_free(conn_b);

	return TEST_OK;
}
Example #13
0
/**
 * Perform some basic functional tests on the driver;
 * make sure we can collect data from the sensor in polled
 * and automatic modes.
 */
void
test()
{
	struct range_finder_report report;
	ssize_t sz;
	int ret;

	int fd = open(MB12XX_DEVICE_PATH, O_RDONLY);

	if (fd < 0) {
		err(1, "%s open failed (try 'mb12xx start' if the driver is not running", MB12XX_DEVICE_PATH);
	}

	/* do a simple demand read */
	sz = read(fd, &report, sizeof(report));

	if (sz != sizeof(report)) {
		err(1, "immediate read failed");
	}

	warnx("single read");
	warnx("measurement: %0.2f m", (double)report.distance);
	warnx("time:        %lld", report.timestamp);

	/* start the sensor polling at 2Hz */
	if (OK != ioctl(fd, SENSORIOCSPOLLRATE, 2)) {
		errx(1, "failed to set 2Hz poll rate");
	}

	/* read the sensor 5x and report each value */
	for (unsigned i = 0; i < 5; i++) {
		struct pollfd fds;

		/* wait for data to be ready */
		fds.fd = fd;
		fds.events = POLLIN;
		ret = poll(&fds, 1, 2000);

		if (ret != 1) {
			errx(1, "timed out waiting for sensor data");
		}

		/* now go get it */
		sz = read(fd, &report, sizeof(report));

		if (sz != sizeof(report)) {
			err(1, "periodic read failed");
		}

		warnx("periodic read %u", i);
		warnx("measurement: %0.3f", (double)report.distance);
		warnx("time:        %lld", report.timestamp);
	}

	/* reset the sensor polling to default rate */
	if (OK != ioctl(fd, SENSORIOCSPOLLRATE, SENSOR_POLLRATE_DEFAULT)) {
		errx(1, "failed to set default poll rate");
	}

	errx(0, "PASS");
}
Example #14
0
static int
wait_for_mysql(MYSQL *mysql, int status)
{
#ifdef __WIN__
  fd_set rs, ws, es;
  int res;
  struct timeval tv, *timeout;
  my_socket s= mysql_get_socket(mysql);
  FD_ZERO(&rs);
  FD_ZERO(&ws);
  FD_ZERO(&es);
  if (status & MYSQL_WAIT_READ)
    FD_SET(s, &rs);
  if (status & MYSQL_WAIT_WRITE)
    FD_SET(s, &ws);
  if (status & MYSQL_WAIT_EXCEPT)
    FD_SET(s, &es);
  if (status & MYSQL_WAIT_TIMEOUT)
  {
    tv.tv_sec= mysql_get_timeout_value(mysql);
    tv.tv_usec= 0;
    timeout= &tv;
  }
  else
    timeout= NULL;
  res= select(1, &rs, &ws, &es, timeout);
  if (res == 0)
    return MYSQL_WAIT_TIMEOUT;
  else if (res == SOCKET_ERROR)
  {
    /*
      In a real event framework, we should handle errors and re-try the select.
    */
    return MYSQL_WAIT_TIMEOUT;
  }
  else
  {
    int status= 0;
    if (FD_ISSET(s, &rs))
      status|= MYSQL_WAIT_READ;
    if (FD_ISSET(s, &ws))
      status|= MYSQL_WAIT_WRITE;
    if (FD_ISSET(s, &es))
      status|= MYSQL_WAIT_EXCEPT;
    return status;
  }
#else
  struct pollfd pfd;
  int timeout;
  int res;

  pfd.fd= mysql_get_socket(mysql);
  pfd.events=
    (status & MYSQL_WAIT_READ ? POLLIN : 0) |
    (status & MYSQL_WAIT_WRITE ? POLLOUT : 0) |
    (status & MYSQL_WAIT_EXCEPT ? POLLPRI : 0);
  if (status & MYSQL_WAIT_TIMEOUT)
    timeout= 1000*mysql_get_timeout_value(mysql);
  else
    timeout= -1;
  res= poll(&pfd, 1, timeout);
  if (res == 0)
    return MYSQL_WAIT_TIMEOUT;
  else if (res < 0)
  {
    /*
      In a real event framework, we should handle EINTR and re-try the poll.
    */
    return MYSQL_WAIT_TIMEOUT;
  }
  else
  {
    int status= 0;
    if (pfd.revents & POLLIN)
      status|= MYSQL_WAIT_READ;
    if (pfd.revents & POLLOUT)
      status|= MYSQL_WAIT_WRITE;
    if (pfd.revents & POLLPRI)
      status|= MYSQL_WAIT_EXCEPT;
    return status;
  }
#endif
}
Example #15
0
File: select.c Project: kevinw/curl
/*
 * This is a wrapper around poll().  If poll() does not exist, then
 * select() is used instead.  An error is returned if select() is
 * being used and a file descriptor is too large for FD_SETSIZE.
 * A negative timeout value makes this function wait indefinitely,
 * unles no valid file descriptor is given, when this happens the
 * negative timeout is ignored and the function times out immediately.
 * When compiled with CURL_ACKNOWLEDGE_EINTR defined, EINTR condition
 * is honored and function might exit early without awaiting timeout,
 * otherwise EINTR will be ignored.
 *
 * Return values:
 *   -1 = system call error or fd >= FD_SETSIZE
 *    0 = timeout
 *    N = number of structures with non zero revent fields
 */
int Curl_poll(struct pollfd ufds[], unsigned int nfds, int timeout_ms)
{
#ifndef HAVE_POLL_FINE
  struct timeval pending_tv;
  struct timeval *ptimeout;
  fd_set fds_read;
  fd_set fds_write;
  fd_set fds_err;
  curl_socket_t maxfd;
#endif
  struct timeval initial_tv = {0,0};
  bool fds_none = TRUE;
  unsigned int i;
  int pending_ms = 0;
  int error;
  int r;

  if(ufds) {
    for (i = 0; i < nfds; i++) {
      if(ufds[i].fd != CURL_SOCKET_BAD) {
        fds_none = FALSE;
        break;
      }
    }
  }
  if(fds_none) {
    r = wait_ms(timeout_ms);
    return r;
  }

  /* Avoid initial timestamp, avoid gettimeofday() call, when elapsed
     time in this function does not need to be measured. This happens
     when function is called with a zero timeout or a negative timeout
     value indicating a blocking call should be performed. */

  if(timeout_ms > 0) {
    pending_ms = timeout_ms;
    initial_tv = curlx_tvnow();
  }

#ifdef HAVE_POLL_FINE

  do {
    if(timeout_ms < 0)
      pending_ms = -1;
    else if(!timeout_ms)
      pending_ms = 0;
    r = poll(ufds, nfds, pending_ms);
    if(r != -1)
      break;
    error = SOCKERRNO;
    if(error && error_not_EINTR)
      break;
    if(timeout_ms > 0) {
      pending_ms = timeout_ms - elapsed_ms;
      if(pending_ms <= 0)
        break;
    }
  } while(r == -1);

#else  /* HAVE_POLL_FINE */

  FD_ZERO(&fds_read);
  FD_ZERO(&fds_write);
  FD_ZERO(&fds_err);
  maxfd = (curl_socket_t)-1;

  for (i = 0; i < nfds; i++) {
    ufds[i].revents = 0;
    if(ufds[i].fd == CURL_SOCKET_BAD)
      continue;
    VERIFY_SOCK(ufds[i].fd);
    if(ufds[i].events & (POLLIN|POLLOUT|POLLPRI|
                          POLLRDNORM|POLLWRNORM|POLLRDBAND)) {
      if(ufds[i].fd > maxfd)
        maxfd = ufds[i].fd;
      if(ufds[i].events & (POLLRDNORM|POLLIN))
        FD_SET(ufds[i].fd, &fds_read);
      if(ufds[i].events & (POLLWRNORM|POLLOUT))
        FD_SET(ufds[i].fd, &fds_write);
      if(ufds[i].events & (POLLRDBAND|POLLPRI))
        FD_SET(ufds[i].fd, &fds_err);
    }
  }

  ptimeout = (timeout_ms < 0) ? NULL : &pending_tv;

  do {
    if(timeout_ms > 0) {
      pending_tv.tv_sec = pending_ms / 1000;
      pending_tv.tv_usec = (pending_ms % 1000) * 1000;
    }
    else if(!timeout_ms) {
      pending_tv.tv_sec = 0;
      pending_tv.tv_usec = 0;
    }
    r = select((int)maxfd + 1, &fds_read, &fds_write, &fds_err, ptimeout);
    if(r != -1)
      break;
    error = SOCKERRNO;
    if(error && error_not_EINTR)
      break;
    if(timeout_ms > 0) {
      pending_ms = timeout_ms - elapsed_ms;
      if(pending_ms <= 0)
        break;
    }
  } while(r == -1);

  if(r < 0)
    return -1;
  if(r == 0)
    return 0;

  r = 0;
  for (i = 0; i < nfds; i++) {
    ufds[i].revents = 0;
    if(ufds[i].fd == CURL_SOCKET_BAD)
      continue;
    if(FD_ISSET(ufds[i].fd, &fds_read))
      ufds[i].revents |= POLLIN;
    if(FD_ISSET(ufds[i].fd, &fds_write))
      ufds[i].revents |= POLLOUT;
    if(FD_ISSET(ufds[i].fd, &fds_err))
      ufds[i].revents |= POLLPRI;
    if(ufds[i].revents != 0)
      r++;
  }

#endif  /* HAVE_POLL_FINE */

  return r;
}
static enum req_fsm_nxt
http1_wait(struct sess *sp, struct worker *wrk, struct req *req)
{
	int j, tmo;
	struct pollfd pfd[1];
	double now, when;
	enum sess_close why = SC_NULL;
	enum http1_status_e hs;

	CHECK_OBJ_NOTNULL(sp, SESS_MAGIC);
	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
	CHECK_OBJ_NOTNULL(req, REQ_MAGIC);

	assert(req->sp == sp);

	AZ(req->vcl);
	AZ(req->esi_level);
	AZ(isnan(sp->t_idle));
	assert(isnan(req->t_first));
	assert(isnan(req->t_prev));
	assert(isnan(req->t_req));

	tmo = (int)(1e3 * cache_param->timeout_linger);
	while (1) {
		pfd[0].fd = sp->fd;
		pfd[0].events = POLLIN;
		pfd[0].revents = 0;
		j = poll(pfd, 1, tmo);
		assert(j >= 0);
		now = VTIM_real();
		if (j != 0)
			hs = HTTP1_Rx(req->htc);
		else
			hs = HTTP1_Complete(req->htc);
		if (hs == HTTP1_COMPLETE) {
			/* Got it, run with it */
			if (isnan(req->t_first))
				req->t_first = now;
			if (isnan(req->t_req))
				req->t_req = now;
			req->acct.req_hdrbytes += Tlen(req->htc->rxbuf);
			return (REQ_FSM_MORE);
		} else if (hs == HTTP1_ERROR_EOF) {
			why = SC_REM_CLOSE;
			break;
		} else if (hs == HTTP1_OVERFLOW) {
			why = SC_RX_OVERFLOW;
			break;
		} else if (hs == HTTP1_ALL_WHITESPACE) {
			/* Nothing but whitespace */
			when = sp->t_idle + cache_param->timeout_idle;
			if (when < now) {
				why = SC_RX_TIMEOUT;
				break;
			}
			when = sp->t_idle + cache_param->timeout_linger;
			tmo = (int)(1e3 * (when - now));
			if (when < now || tmo == 0) {
				wrk->stats->sess_herd++;
				SES_ReleaseReq(req);
				WAIT_Enter(sp);
				return (REQ_FSM_DONE);
			}
		} else {
			/* Working on it */
			if (isnan(req->t_first))
				/* Record first byte received time stamp */
				req->t_first = now;
			when = sp->t_idle + cache_param->timeout_req;
			tmo = (int)(1e3 * (when - now));
			if (when < now || tmo == 0) {
				why = SC_RX_TIMEOUT;
				break;
			}
		}
	}
	req->acct.req_hdrbytes += Tlen(req->htc->rxbuf);
	CNT_AcctLogCharge(wrk->stats, req);
	SES_ReleaseReq(req);
	assert(why != SC_NULL);
	SES_Delete(sp, why, now);
	return (REQ_FSM_DONE);
}
Example #17
0
/**
  Poll the socket for more work

  @retval  EFI_SUCCESS      The application is running normally
  @retval  EFI_NOT_STARTED  Listen socket error
  @retval  Other            The user stopped the application
**/
EFI_STATUS
SocketPoll (
  )
{
  BOOLEAN bRemoveSocket;
  BOOLEAN bListenError;
  size_t BytesReceived;
  int CloseStatus;
  nfds_t Entry;
  INTN EntryPrevious;
  int FdCount;
  nfds_t Index;
  socklen_t LengthInBytes;
  struct sockaddr_in * pPortIpAddress4;
  struct sockaddr_in6 * pPortIpAddress6;
  struct sockaddr_in * pRemoteAddress4;
  struct sockaddr_in6 * pRemoteAddress6;
  struct sockaddr_in6 RemoteAddress;
  int Socket;
  EFI_STATUS Status;
  EFI_TPL TplPrevious;

  //
  //  Check for control-C
  //
  pRemoteAddress4 = (struct sockaddr_in *)&RemoteAddress;
  pRemoteAddress6 = (struct sockaddr_in6 *)&RemoteAddress;
  bListenError = FALSE;
  Status = ControlCCheck ( );
  if ( !EFI_ERROR ( Status )) {
    //
    //  Poll the sockets
    //
    FdCount = poll ( &PollFd[0],
                     MaxPort,
                     0 );
    if ( -1 == FdCount ) {
      //
      //  Poll error
      //
      DEBUG (( DEBUG_ERROR,
                "ERROR - Poll error, errno: %d\r\n",
                errno ));
      Status = EFI_DEVICE_ERROR;
    }
    else {
      //
      //  Process the poll output
      //
      Index = 0;
      while ( FdCount ) {
        bRemoveSocket = FALSE;

        //
        //  Account for this descriptor
        //
        pPortIpAddress4 = (struct sockaddr_in *)&Port[ Index ].IpAddress;
        pPortIpAddress6 = (struct sockaddr_in6 *)&Port[ Index ].IpAddress;
        if ( 0 != PollFd[ Index ].revents ) {
          FdCount -= 1;
        }

        //
        //  Check for a broken connection
        //
        if ( 0 != ( PollFd[ Index ].revents & POLLHUP )) {
          bRemoveSocket = TRUE;
          if ( ListenSocket == PollFd[ Index ].fd ) {
            bListenError = TRUE;
            DEBUG (( DEBUG_ERROR,
                      "ERROR - Network closed on listen socket, errno: %d\r\n",
                      errno ));
          }
          else {
            if ( AF_INET == pPortIpAddress4->sin_family ) {
              DEBUG (( DEBUG_ERROR,
                        "ERROR - Network closed on socket %d.%d.%d.%d:%d, errno: %d\r\n",
                        pPortIpAddress4->sin_addr.s_addr & 0xff,
                        ( pPortIpAddress4->sin_addr.s_addr >> 8 ) & 0xff,
                        ( pPortIpAddress4->sin_addr.s_addr >> 16 ) & 0xff,
                        ( pPortIpAddress4->sin_addr.s_addr >> 24 ) & 0xff,
                        ntohs ( pPortIpAddress4->sin_port ),
                        errno ));
            }
            else {
              DEBUG (( DEBUG_ERROR,
                        "ERROR - Network closed on socket [%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x]:%d, errno: %d\r\n",
                        pPortIpAddress6->sin6_addr.__u6_addr.__u6_addr8[ 0 ],
                        pPortIpAddress6->sin6_addr.__u6_addr.__u6_addr8[ 1 ],
                        pPortIpAddress6->sin6_addr.__u6_addr.__u6_addr8[ 2 ],
                        pPortIpAddress6->sin6_addr.__u6_addr.__u6_addr8[ 3 ],
                        pPortIpAddress6->sin6_addr.__u6_addr.__u6_addr8[ 4 ],
                        pPortIpAddress6->sin6_addr.__u6_addr.__u6_addr8[ 5 ],
                        pPortIpAddress6->sin6_addr.__u6_addr.__u6_addr8[ 6 ],
                        pPortIpAddress6->sin6_addr.__u6_addr.__u6_addr8[ 7 ],
                        pPortIpAddress6->sin6_addr.__u6_addr.__u6_addr8[ 8 ],
                        pPortIpAddress6->sin6_addr.__u6_addr.__u6_addr8[ 9 ],
                        pPortIpAddress6->sin6_addr.__u6_addr.__u6_addr8[ 10 ],
                        pPortIpAddress6->sin6_addr.__u6_addr.__u6_addr8[ 11 ],
                        pPortIpAddress6->sin6_addr.__u6_addr.__u6_addr8[ 12 ],
                        pPortIpAddress6->sin6_addr.__u6_addr.__u6_addr8[ 13 ],
                        pPortIpAddress6->sin6_addr.__u6_addr.__u6_addr8[ 14 ],
                        pPortIpAddress6->sin6_addr.__u6_addr.__u6_addr8[ 15 ],
                        ntohs ( pPortIpAddress6->sin6_port ),
                        errno ));
            }

            //
            //  Close the socket
            //
            CloseStatus = close ( PollFd[ Index ].fd );
            if ( 0 == CloseStatus ) {
              bRemoveSocket = TRUE;
              if ( AF_INET == pPortIpAddress4->sin_family ) {
                DEBUG (( DEBUG_INFO,
                          "0x%08x: Socket closed for %d.%d.%d.%d:%d\r\n",
                          PollFd[ Index ].fd,
                          pPortIpAddress4->sin_addr.s_addr & 0xff,
                          ( pPortIpAddress4->sin_addr.s_addr >> 8 ) & 0xff,
                          ( pPortIpAddress4->sin_addr.s_addr >> 16 ) & 0xff,
                          ( pPortIpAddress4->sin_addr.s_addr >> 24 ) & 0xff,
                          ntohs ( pPortIpAddress4->sin_port )));
              }
              else {
Example #18
0
static void loop (int master_fd, int ignore_eof)
{
    ssize_t n;
    char buf [BUFSIZ];
    struct pollfd fds [2];
    char control_buf [BUFSIZ];
    char data_buf [BUFSIZ];
    int flags;
    struct strbuf control;
    struct strbuf data;
    struct iocblk *ioc;
    struct termios *term;
    unsigned char msg_type;
    int i;

    fds [0].fd = STDIN_FILENO;
    fds [0].events = POLLIN;
    fds [0].revents = 0;
    fds [1].fd = master_fd;
    fds [1].events = POLLIN;
    fds [1].revents = 0;

    control.buf = control_buf;
    control.maxlen = BUFSIZ;
    data.buf = data_buf;
    data.maxlen = BUFSIZ;

    for (;;) {
        if (poll ((struct pollfd *) &fds, 2, INFTIM) == -1)
            err_msg ("poll failed");

        if (fds [0].revents & POLLIN) {
            if ((n = read (STDIN_FILENO, buf, BUFSIZ)) == -1)
                err_msg ("read from stdin failed");

            if (n == 0) {
                if (ignore_eof) {
                    fds [0].events = 0;
                    continue;
                }
                else
                    break;
            }

            if (writen (master_fd, buf, n) == -1)
                err_msg ("writen to pty master failed");
        }

        if (fds [1].revents & POLLIN) {
            flags = 0;
            if ((n = getmsg (master_fd, &control, &data, &flags)) == -1)
                err_msg ("getmsg from pty master failed");

            msg_type = control.buf [0];

            switch (msg_type) {
            case M_DATA:
                if (writen (STDOUT_FILENO, data.buf, data.len) == -1)
                    err_msg ("writen to stdout failed");
                break;

            case M_FLUSH:
                fprintf (stderr, "pckt: pty slave flushed its queues\n");
                break;

            case M_STOPI:
                fprintf (stderr, "pckt: pty slave suspended output\n");
                break;

            case M_STARTI:
                fprintf (stderr, "pckt: pty slave resumed output\n");
                break;

            case M_STOP:
                fprintf (stderr, "pckt: pty slave disabled XON/XOFF "
                         "flow control\n");
                break;

            case M_START:
                fprintf (stderr, "pckt: pty slave enabled XON/XOFF "
                         "flow control\n");
                break;

            case M_IOCTL:
                ioc = (struct iocblk *) &data.buf [0];
                switch (ioc -> ioc_cmd) {
                case TCSBRK:
                    fprintf (stderr, "pckt: pty slave sent BREAK\n");
                    goto out;

                case TCSETS:
                case TCSETSW:
                case TCSETSF:
                    fprintf (stderr, "pckt: pty slave changed "
                             "terminal attributes\n");
                    term = (struct termios *)
                           &data.buf [sizeof (struct iocblk)];
                    fprintf (stderr, "  term.c_iflag = %04x\n",
                             term -> c_iflag);
                    fprintf (stderr, "  term.c_oflag = %04x\n",
                             term -> c_oflag);
                    fprintf (stderr, "  term.c_cflag = %04x\n",
                             term -> c_cflag);
                    fprintf (stderr, "  term.c_lflag = %04x\n",
                             term -> c_lflag);
                    fprintf (stderr, "  term.c_cc = ");
                    for (i = 0; i < NCCS; i++)
                        fprintf (stderr, "%02x ", term -> c_cc [i]);
                    fprintf (stderr, "\n");
                    break;

                default:
                    fprintf (stderr, "pckt: Unrecognised ioc_cmd: "
                             "%04x\n", ioc -> ioc_cmd);
                    fprintf (stderr, "  ioc -> ioc_cmd = %04x\n",
                             ioc -> ioc_cmd);
                    fprintf (stderr, "  ioc -> ioc_id = %04x\n",
                             ioc -> ioc_id);
                    fprintf (stderr, "  ioc -> ioc_flag = %04x\n",
                             ioc -> ioc_flag);
                    fprintf (stderr, "  ioc -> ioc_count = %04x\n",
                             ioc -> ioc_count);
                    fprintf (stderr, "  ioc -> ioc_rval = %04x\n",
                             ioc -> ioc_rval);
                    fprintf (stderr, "  ioc -> ioc_error = %04x\n",
                             ioc -> ioc_error);
                    break;
                }
                break;

            default:
                fprintf (stderr, "pckt: Unrecognised message type: "
                         "%02x\n", msg_type);
                break;
            }
        }
    }

out:
    ;
}
Example #19
0
static void
rtpp_cmd_acceptor_run(void *arg)
{
    struct rtpp_cmd_async_cf *cmd_cf;
    struct pollfd *tp;
    struct rtpp_cmd_pollset *psp;
    struct rtpp_cmd_accptset *asp;
    struct rtpp_cmd_connection *rcc;
    int nready, controlfd, i, tstate;

    cmd_cf = (struct rtpp_cmd_async_cf *)arg;
    psp = &cmd_cf->pset;
    asp = &cmd_cf->aset;

    for (;;) {
#ifndef LINUX_XXX
        nready = poll(asp->pfds, asp->pfds_used, INFTIM);
#else
	nready = poll(asp->pfds, asp->pfds_used, 100);
#endif
        pthread_mutex_lock(&cmd_cf->cmd_mutex);
        tstate = cmd_cf->tstate_acceptor;
        pthread_mutex_unlock(&cmd_cf->cmd_mutex);
        if (tstate == TSTATE_CEASE) {
            break;
        }
        if (nready <= 0)
            continue;
        for (i = 0; i < asp->pfds_used; i++) {
            if ((asp->pfds[i].revents & POLLIN) == 0) {
                continue;
            }
            pthread_mutex_lock(&psp->pfds_mutex);
            if (psp->pfds_used >= RTPC_MAX_CONNECTIONS) {
                pthread_mutex_unlock(&psp->pfds_mutex);
                continue;
            }
            controlfd = accept_connection(cmd_cf->cf_save, asp->pfds[i].fd);
            if (controlfd < 0) {
                pthread_mutex_unlock(&psp->pfds_mutex);
                continue;
            }
            tp = realloc(psp->pfds, sizeof(struct pollfd) * (psp->pfds_used + 1));
            if (tp == NULL) {
                pthread_mutex_unlock(&psp->pfds_mutex);
                close(controlfd); /* Yeah, sorry, please try later */
                continue;
            }
            rcc = rtpp_cmd_connection_ctor(controlfd, controlfd, asp->csocks[i]);
            if (rcc == NULL) {
                pthread_mutex_unlock(&psp->pfds_mutex);
                close(controlfd); /* Yeah, sorry, please try later */
                continue;
            }
            psp->pfds = tp;
            psp->pfds[psp->pfds_used].fd = controlfd;
            psp->pfds[psp->pfds_used].events = POLLIN | POLLERR | POLLHUP;
            psp->pfds[psp->pfds_used].revents = 0;
            psp->rccs[psp->pfds_used] = rcc;
            psp->pfds_used++;
            pthread_mutex_unlock(&psp->pfds_mutex);
            rtpp_command_async_wakeup(&cmd_cf->pub);
        }
    }
}
Example #20
0
static void ping(char *svr)
{
	struct sigaction sa;
	struct sockaddr_l2 addr;
	socklen_t optlen;
	unsigned char *send_buf;
	unsigned char *recv_buf;
	char str[18];
	int i, sk, lost;
	uint8_t id;

	memset(&sa, 0, sizeof(sa));
	sa.sa_handler = stat;
	sigaction(SIGINT, &sa, NULL);

	send_buf = malloc(L2CAP_CMD_HDR_SIZE + size);
	recv_buf = malloc(L2CAP_CMD_HDR_SIZE + size);
	if (!send_buf || !recv_buf) {
		perror("Can't allocate buffer");
		exit(1);
	}

	/* Create socket */
	sk = socket(PF_BLUETOOTH, SOCK_RAW, BTPROTO_L2CAP);
	if (sk < 0) {
		perror("Can't create socket");
		goto error;
	}

	/* Bind to local address */
	memset(&addr, 0, sizeof(addr));
	addr.l2_family = AF_BLUETOOTH;
	bacpy(&addr.l2_bdaddr, &bdaddr);

	if (bind(sk, (struct sockaddr *) &addr, sizeof(addr)) < 0) {
		perror("Can't bind socket");
		goto error;
	}

	/* Connect to remote device */
	memset(&addr, 0, sizeof(addr));
	addr.l2_family = AF_BLUETOOTH;
	str2ba(svr, &addr.l2_bdaddr);

	if (connect(sk, (struct sockaddr *) &addr, sizeof(addr)) < 0) {
		perror("Can't connect");
		goto error;
	}

	/* Get local address */
	memset(&addr, 0, sizeof(addr));
	optlen = sizeof(addr);

	if (getsockname(sk, (struct sockaddr *) &addr, &optlen) < 0) {
		perror("Can't get local address");
		goto error;
	}

	ba2str(&addr.l2_bdaddr, str);
	printf("Ping: %s from %s (data size %d) ...\n", svr, str, size);

	/* Initialize send buffer */
	for (i = 0; i < size; i++)
		send_buf[L2CAP_CMD_HDR_SIZE + i] = (i % 40) + 'A';

	id = ident;

	while (count == -1 || count-- > 0) {
		struct timeval tv_send, tv_recv, tv_diff;
		l2cap_cmd_hdr *send_cmd = (l2cap_cmd_hdr *) send_buf;
		l2cap_cmd_hdr *recv_cmd = (l2cap_cmd_hdr *) recv_buf;

		/* Build command header */
		send_cmd->ident = id;
		send_cmd->len   = htobs(size);

		if (reverse)
			send_cmd->code = L2CAP_ECHO_RSP;
		else
			send_cmd->code = L2CAP_ECHO_REQ;

		gettimeofday(&tv_send, NULL);

		/* Send Echo Command */
		if (send(sk, send_buf, L2CAP_CMD_HDR_SIZE + size, 0) <= 0) {
			perror("Send failed");
			goto error;
		}

		/* Wait for Echo Response */
		lost = 0;
		while (1) {
			struct pollfd pf[1];
			int err;

			pf[0].fd = sk;
			pf[0].events = POLLIN;

			if ((err = poll(pf, 1, timeout * 1000)) < 0) {
				perror("Poll failed");
				goto error;
			}

			if (!err) {
				lost = 1;
				break;
			}

			if ((err = recv(sk, recv_buf, L2CAP_CMD_HDR_SIZE + size, 0)) < 0) {
				perror("Recv failed");
				goto error;
			}

			if (!err){
				printf("Disconnected\n");
				goto error;
			}

			recv_cmd->len = btohs(recv_cmd->len);

			/* Check for our id */
			if (recv_cmd->ident != id)
				continue;

			/* Check type */
			if (!reverse && recv_cmd->code == L2CAP_ECHO_RSP)
				break;

			if (recv_cmd->code == L2CAP_COMMAND_REJ) {
				printf("Peer doesn't support Echo packets\n");
				goto error;
			}

		}
		sent_pkt++;

		if (!lost) {
			recv_pkt++;

			gettimeofday(&tv_recv, NULL);
			timersub(&tv_recv, &tv_send, &tv_diff);

			if (verify) {
				/* Check payload length */
				if (recv_cmd->len != size) {
					fprintf(stderr, "Received %d bytes, expected %d\n",
						   recv_cmd->len, size);
					goto error;
				}

				/* Check payload */
				if (memcmp(&send_buf[L2CAP_CMD_HDR_SIZE],
						   &recv_buf[L2CAP_CMD_HDR_SIZE], size)) {
					fprintf(stderr, "Response payload different.\n");
					goto error;
				}
			}

			printf("%d bytes from %s id %d time %.2fms\n", recv_cmd->len, svr,
				   id - ident, tv2fl(tv_diff));

			if (delay)
				sleep(delay);
		} else {
			printf("no response from %s: id %d\n", svr, id - ident);
		}

		if (++id > 254)
			id = ident;
	}
	stat(0);
	free(send_buf);
	free(recv_buf);
	return;

error:
	close(sk);
	free(send_buf);
	free(recv_buf);
	exit(1);
}
Example #21
0
static void
rtpp_cmd_queue_run(void *arg)
{
    struct rtpp_cmd_async_cf *cmd_cf;
    struct rtpp_cmd_pollset *psp;
    int i, nready, rval, umode;
    double sptime;
#if 0
    double eptime, tused;
#endif
    struct rtpp_command_stats *csp;
    struct rtpp_stats_obj *rtpp_stats_cf;

    cmd_cf = (struct rtpp_cmd_async_cf *)arg;
    rtpp_stats_cf = cmd_cf->cf_save->stable->rtpp_stats;
    csp = &cmd_cf->cstats;

    psp = &cmd_cf->pset;

    for (;;) {
        sptime = getdtime();

        pthread_mutex_lock(&psp->pfds_mutex);
        if (psp->pfds_used == 0) {
            pthread_mutex_unlock(&psp->pfds_mutex);
            if (wait_next_clock(cmd_cf) == TSTATE_CEASE) {
                break;
            }
            continue;
        }
        nready = poll(psp->pfds, psp->pfds_used, 2);
        if (nready == 0) {
            pthread_mutex_unlock(&psp->pfds_mutex);
            if (wait_next_clock(cmd_cf) == TSTATE_CEASE) {
                break;
            }
            continue;
        }
        if (nready < 0 && errno == EINTR) {
            pthread_mutex_unlock(&psp->pfds_mutex);
            continue;
        }
        if (nready > 0) {
            for (i = 0; i < psp->pfds_used; i++) {
                if ((psp->pfds[i].revents & (POLLERR | POLLHUP)) != 0) {
                    if (RTPP_CTRL_ISUNIX(psp->rccs[i]->csock)) {
                        goto closefd;
                    }
                    if (psp->rccs[i]->csock->type == RTPC_STDIO && (psp->pfds[i].revents & POLLIN) == 0) {
                        goto closefd;
                    }
                }
                if ((psp->pfds[i].revents & POLLIN) == 0) {
                    continue;
                }
                if (RTPP_CTRL_ISSTREAM(psp->rccs[i]->csock)) {
                    rval = process_commands_stream(cmd_cf->cf_save, psp->rccs[i], sptime, csp, rtpp_stats_cf);
                } else {
                    umode = RTPP_CTRL_ISDG(psp->rccs[i]->csock);
                    rval = process_commands(cmd_cf->cf_save, psp->pfds[i].fd,
                      sptime, csp, umode, rtpp_stats_cf, cmd_cf->rcache);
                }
                /*
                 * Shut down non-datagram sockets that got I/O error
                 * and also all non-continuous UNIX sockets are recycled
                 * after each use.
                 */
                if (!RTPP_CTRL_ISDG(psp->rccs[i]->csock) && (rval == -1 || !RTPP_CTRL_ISSTREAM(psp->rccs[i]->csock))) {
closefd:
                    if (psp->rccs[i]->csock->type == RTPC_STDIO && psp->rccs[i]->csock->exit_on_close != 0) {
                        cmd_cf->cf_save->stable->slowshutdown = 1;
                    }
                    rtpp_cmd_connection_dtor(psp->rccs[i]);
                    psp->pfds_used--;
                    if (psp->pfds_used > 0 && i < psp->pfds_used) {
                        memcpy(&psp->pfds[i], &psp->pfds[i + 1],
                          (psp->pfds_used - i) * sizeof(struct pollfd));
                        memcpy(&psp->rccs[i], &psp->rccs[i + 1],
                          (psp->pfds_used - i) * sizeof(struct rtpp_ctrl_connection *));
                    }
                }
            }
        }
        pthread_mutex_unlock(&psp->pfds_mutex);
        if (nready > 0) {
            rtpp_anetio_pump(cmd_cf->cf_save->stable->rtpp_netio_cf);
        }
#if 0
        eptime = getdtime();
        pthread_mutex_lock(&cmd_cf->cmd_mutex);
        recfilter_apply(&cmd_cf->average_load, (eptime - sptime + tused) * cmd_cf->cf_save->stable->target_pfreq);
        pthread_mutex_unlock(&cmd_cf->cmd_mutex);
#endif
        flush_cstats(rtpp_stats_cf, csp);
#if 0
#if RTPP_DEBUG
        if (last_ctick % (unsigned int)cmd_cf->cf_save->stable->target_pfreq == 0 || last_ctick < 1000) {
            rtpp_log_write(RTPP_LOG_DBUG, cmd_cf->cf_save->stable->glog, "rtpp_cmd_queue_run %lld sptime %f eptime %f, CSV: %f,%f,%f,%f,%f", \
              last_ctick, sptime, eptime, (double)last_ctick / cmd_cf->cf_save->stable->target_pfreq, \
              eptime - sptime + tused, eptime, sptime, tused);
            rtpp_log_write(RTPP_LOG_DBUG, cmd_cf->cf_save->stable->glog, "run %lld average load %f, CSV: %f,%f", last_ctick, \
              cmd_cf->average_load.lastval * 100.0, (double)last_ctick / cmd_cf->cf_save->stable->target_pfreq, cmd_cf->average_load.lastval);
        }
#endif
#endif
    }
}
Example #22
0
int tls_write_all(rdpTls* tls, const BYTE* data, int length)
{
	int status, nchunks, commitedBytes;
	rdpTcp *tcp;
#ifdef HAVE_POLL_H
	struct pollfd pollfds;
#else
	fd_set rset, wset;
	fd_set *rsetPtr, *wsetPtr;
	struct timeval tv;
#endif
	BIO* bio = tls->bio;
	DataChunk chunks[2];

	BIO* bufferedBio = findBufferedBio(bio);

	if (!bufferedBio)
	{
		WLog_ERR(TAG,  "error unable to retrieve the bufferedBio in the BIO chain");
		return -1;
	}

	tcp = (rdpTcp*) bufferedBio->ptr;

	do
	{
		status = BIO_write(bio, data, length);

		if (status > 0)
			break;

		if (!BIO_should_retry(bio))
			return -1;
#ifdef HAVE_POLL_H
		pollfds.fd = tcp->sockfd;
		pollfds.revents = 0;
		pollfds.events = 0;

		if (tcp->writeBlocked)
		{
			pollfds.events |= POLLOUT;
		}
		else if (tcp->readBlocked)
		{
			pollfds.events |= POLLIN;
		}
		else
		{
			WLog_ERR(TAG,  "weird we're blocked but the underlying is not read or write blocked !");
			USleep(10);
			continue;
		}

		do
		{
			status = poll(&pollfds, 1, 100);
		}
		while ((status < 0) && (errno == EINTR));
#else
		/* we try to handle SSL want_read and want_write nicely */
		rsetPtr = wsetPtr = NULL;

		if (tcp->writeBlocked)
		{
			wsetPtr = &wset;
			FD_ZERO(&wset);
			FD_SET(tcp->sockfd, &wset);
		}
		else if (tcp->readBlocked)
		{
			rsetPtr = &rset;
			FD_ZERO(&rset);
			FD_SET(tcp->sockfd, &rset);
		}
		else
		{
			WLog_ERR(TAG,  "weird we're blocked but the underlying is not read or write blocked !");
			USleep(10);
			continue;
		}

		tv.tv_sec = 0;
		tv.tv_usec = 100 * 1000;

		status = _select(tcp->sockfd + 1, rsetPtr, wsetPtr, NULL, &tv);
#endif
		if (status < 0)
			return -1;
	}
	while (TRUE);

	/* make sure the output buffer is empty */
	commitedBytes = 0;
	while ((nchunks = ringbuffer_peek(&tcp->xmitBuffer, chunks, ringbuffer_used(&tcp->xmitBuffer))))
	{
		int i;

		for (i = 0; i < nchunks; i++)
		{
			while (chunks[i].size)
			{
				status = BIO_write(tcp->socketBio, chunks[i].data, chunks[i].size);

				if (status > 0)
				{
					chunks[i].size -= status;
					chunks[i].data += status;
					commitedBytes += status;
					continue;
				}

				if (!BIO_should_retry(tcp->socketBio))
					goto out_fail;

#ifdef HAVE_POLL_H
				pollfds.fd = tcp->sockfd;
				pollfds.events = POLLIN;
				pollfds.revents = 0;

				do
				{
					status = poll(&pollfds, 1, 100);
				}
				while ((status < 0) && (errno == EINTR));
#else
				FD_ZERO(&rset);
				FD_SET(tcp->sockfd, &rset);
				tv.tv_sec = 0;
				tv.tv_usec = 100 * 1000;

				status = _select(tcp->sockfd + 1, &rset, NULL, NULL, &tv);
#endif
				if (status < 0)
					goto out_fail;
			}

		}
	}

	ringbuffer_commit_read_bytes(&tcp->xmitBuffer, commitedBytes);
	return length;

out_fail:
	ringbuffer_commit_read_bytes(&tcp->xmitBuffer, commitedBytes);
	return -1;
}
Example #23
0
void run_work(struct work_param* param) {
	if (pipe(param->pipes) != 0) {
		printf("Failed to create pipe! %s\n", strerror(errno));
		return;
	}
	unsigned char wb;
	unsigned char* mbuf = xmalloc(1024);
	while (1) {
		pthread_rwlock_rdlock(&param->conns->data_mutex);
		size_t cc = param->conns->count;
		struct pollfd fds[cc + 1];
		struct conn* conns[cc];
		int fdi = 0;
		for (int i = 0; i < param->conns->size; i++) {
			if (param->conns->data[i] != NULL) {
				conns[fdi] = (param->conns->data[i]);
				struct conn* conn = conns[fdi];
				fds[fdi].fd = conns[fdi]->fd;
				fds[fdi].events = POLLIN | (conn->writeBuffer_size > 0 ? POLLOUT : 0);
				fds[fdi++].revents = 0;
				if (fdi == cc) break;
			} else conns[fdi] = NULL;
		}
		pthread_rwlock_unlock(&param->conns->data_mutex);
		fds[cc].fd = param->pipes[0];
		fds[cc].events = POLLIN;
		fds[cc].revents = 0;
		int cp = poll(fds, cc + 1, -1);
		if (param->mysql->mysql && param->mysql->complete && param->zone != param->mysql->czone) {
			if (param->zone != NULL) {
				freeZone(param->zone);
			}
			param->zone = param->mysql->czone;
		}
		if (cp < 0) {
			printf("Poll error in worker thread! %s\n", strerror(errno));
		} else if (cp == 0) continue;
		else if ((fds[cc].revents & POLLIN) == POLLIN) {
			if (read(param->pipes[0], &wb, 1) < 1) printf("Error reading from pipe, infinite loop COULD happen here.\n");
			if (cp-- == 1) continue;
		}
		for (int i = 0; i < cc; i++) {
			int re = fds[i].revents;
			struct conn* conn = conns[i];
			if (conn == NULL) continue;
			if ((re & POLLERR) == POLLERR) {
				//printf("POLLERR in worker poll! This is bad!\n");
				goto cont;
			}
			if ((re & POLLHUP) == POLLHUP) {
				closeConn(param, conn);
				conn = NULL;
				goto cont;
			}
			if ((re & POLLNVAL) == POLLNVAL) {
				printf("Invalid FD in worker poll! This is bad!\n");
				closeConn(param, conn);
				conn = NULL;
				goto cont;
			}
			if ((re & POLLIN) == POLLIN) {
				size_t tr = 0;
				ioctl(fds[i].fd, FIONREAD, &tr);
				unsigned char* loc;
				if (conn->readBuffer == NULL) {
					conn->readBuffer = xmalloc(tr); // TODO: max upload?
					conn->readBuffer_size = tr;
					loc = conn->readBuffer;
				} else {
					conn->readBuffer_size += tr;
					conn->readBuffer = xrealloc(conn->readBuffer, conn->readBuffer_size);
					loc = conn->readBuffer + conn->readBuffer_size - tr;
				}
				ssize_t r = 0;
				if (r == 0 && tr == 0) { // nothing to read, but wont block.
					ssize_t x = 0;
					x = read(fds[i].fd, loc + r, tr - r);
					if (x <= 0) {
						closeConn(param, conn);
						conn = NULL;
						goto cont;
					}
					r += x;
				}
				while (r < tr) {
					ssize_t x = 0;
					x = read(fds[i].fd, loc + r, tr - r);
					if (x <= 0) {
						closeConn(param, conn);
						conn = NULL;
						goto cont;
					}
					r += x;
				}
				int p = 0;
				p = handleRead(conn, param, fds[i].fd);
				if (p == 1) {
					goto cont;
				}
			}
			if ((re & POLLOUT) == POLLOUT && conn != NULL) {
				ssize_t mtr = write(fds[i].fd, conn->writeBuffer, conn->writeBuffer_size);
				if (mtr < 0 && errno != EAGAIN) {
					closeConn(param, conn);
					conn = NULL;
					goto cont;
				} else if (mtr < 0) {
					goto cont;
				} else if (mtr < conn->writeBuffer_size) {
					memmove(conn->writeBuffer, conn->writeBuffer + mtr, conn->writeBuffer_size - mtr);
					conn->writeBuffer_size -= mtr;
					conn->writeBuffer = xrealloc(conn->writeBuffer, conn->writeBuffer_size);
				} else {
					conn->writeBuffer_size = 0;
					xfree(conn->writeBuffer);
					conn->writeBuffer = NULL;
				}
				if (conn->writeBuffer_size == 0 && conn->state == 1) {
					closeConn(param, conn);
					conn = NULL;
					goto cont;
				}
			}
			cont: ;
			if (--cp == 0) break;
		}
	}
	xfree(mbuf);
}
Example #24
0
/*
 * Filter out unresponsive servers, and save the domain info
 * returned by the "LDAP ping" in the returned object.
 * If ctx != NULL, this is a query for a DC, in which case we
 * also save the Domain GUID, Site name, and Forest name as
 * "auto" (discovered) values in the ctx.
 *
 * Only return the "winner".  (We only want one DC/GC)
 */
ad_disc_ds_t *
ldap_ping(ad_disc_t ctx, ad_disc_cds_t *dclist, char *dname, int reqflags)
{
	struct sockaddr_in6 addr6;
	socklen_t addrlen;
	struct pollfd pingchk;
	ad_disc_cds_t *send_ds;
	ad_disc_cds_t *recv_ds = NULL;
	ad_disc_ds_t *ret_ds = NULL;
	BerElement *req = NULL;
	BerElement *res = NULL;
	struct _berelement *be, *rbe;
	size_t be_len, rbe_len;
	int fd = -1;
	int tries = 3;
	int waitsec;
	int r;
	uint16_t msgid;

	/* One plus a null entry. */
	ret_ds = calloc(2, sizeof (ad_disc_ds_t));
	if (ret_ds == NULL)
		goto fail;

	if ((fd = socket(PF_INET6, SOCK_DGRAM, 0)) < 0)
		goto fail;

	(void) memset(&addr6, 0, sizeof (addr6));
	addr6.sin6_family = AF_INET6;
	addr6.sin6_addr = in6addr_any;
	if (bind(fd, (struct sockaddr *)&addr6, sizeof (addr6)) < 0)
		goto fail;

	/*
	 * semi-unique msgid...
	 */
	msgid = gethrtime() & 0xffff;

	/*
	 * Is ntver right? It certainly works on w2k8... If others are needed,
	 * that might require changes to cldap_parse
	 */
	req = cldap_build_request(dname, NULL,
	    NETLOGON_NT_VERSION_5EX, msgid);
	if (req == NULL)
		goto fail;
	be = (struct _berelement *)req;
	be_len = be->ber_end - be->ber_buf;

	if ((res = ber_alloc()) == NULL)
		goto fail;
	rbe = (struct _berelement *)res;
	rbe_len = rbe->ber_end - rbe->ber_buf;

	pingchk.fd = fd;
	pingchk.events = POLLIN;
	pingchk.revents = 0;

try_again:
	send_ds = dclist;
	waitsec = 5;
	while (recv_ds == NULL && waitsec > 0) {

		/*
		 * If there is another candidate, send to it.
		 */
		if (send_ds->cds_ds.host[0] != '\0') {
			send_to_cds(send_ds, be->ber_buf, be_len, fd);
			send_ds++;

			/*
			 * Wait 1/10 sec. before the next send.
			 */
			r = poll(&pingchk, 1, 100);
#if 0 /* DEBUG */
			/* Drop all responses 1st pass. */
			if (waitsec == 5)
				r = 0;
#endif
		} else {
			/*
			 * No more candidates to "ping", so
			 * just wait a sec for responses.
			 */
			r = poll(&pingchk, 1, 1000);
			if (r == 0)
				--waitsec;
		}

		if (r > 0) {
			/*
			 * Got a response.
			 */
			(void) memset(&addr6, 0, addrlen = sizeof (addr6));
			r = recvfrom(fd, rbe->ber_buf, rbe_len, 0,
			    (struct sockaddr *)&addr6, &addrlen);

			recv_ds = find_cds_by_addr(dclist, &addr6);
			if (recv_ds == NULL)
				continue;

			(void) cldap_parse(ctx, recv_ds, res);
			if ((recv_ds->cds_ds.flags & reqflags) != reqflags) {
				logger(LOG_ERR, "Skip %s"
				    "due to flags 0x%X",
				    recv_ds->cds_ds.host,
				    recv_ds->cds_ds.flags);
				recv_ds = NULL;
			}
		}
	}

	if (recv_ds == NULL) {
		if (--tries <= 0)
			goto fail;
		goto try_again;
	}

	(void) memcpy(ret_ds, recv_ds, sizeof (*ret_ds));

	ber_free(res, 1);
	ber_free(req, 1);
	(void) close(fd);
	return (ret_ds);

fail:
	ber_free(res, 1);
	ber_free(req, 1);
	(void) close(fd);
	free(ret_ds);
	return (NULL);
}
Example #25
0
/*ARGSUSED*/
static void *
env_polling_thread(void *args)
{
	int			poll_rc;
	struct pollfd		poll_fds[1];
	void			*datap;
	int			smcfd;
	struct strioctl		strio;
	sc_cmdspec_t		set;

	smcfd = env_open_smc();
	if (smcfd == -1) {
		syslog(LOG_ERR, gettext("SUNW_envmond:Error in polling, "
			"Open of SMC drv failed"));
		create_polling_thr = B_TRUE;
		return (NULL);
	}

	set.args[0]	= SMC_SENSOR_EVENT_ENABLE_SET;
	set.attribute	= SC_ATTR_SHARED;
	strio.ic_cmd	= SCIOC_MSG_SPEC;
	strio.ic_timout	= 0;
	strio.ic_len	= ENV_SENSOR_EV_ENABLE_PKT_LEN;
	strio.ic_dp	= (char *)&set;
	if (ioctl(smcfd, I_STR, &strio) < 0) {
		syslog(LOG_ERR, gettext("SUNW_envmond:Request for "
			"Sensor events failed"));
		(void) close(smcfd);
		create_polling_thr = B_TRUE;
		return (NULL);
	}

	/* request for async messages */
	poll_fds[0].fd		= smcfd;
	poll_fds[0].events	= POLLIN|POLLPRI;
	poll_fds[0].revents	= 0;

	set.attribute	= SC_ATTR_SHARED;
	set.args[0]	= SMC_IPMI_RESPONSE_NOTIF;
	set.args[1]	= SMC_SMC_LOCAL_EVENT_NOTIF;
	strio.ic_cmd	= SCIOC_MSG_SPEC;
	strio.ic_timout	= 0;
	strio.ic_len	= ENV_IPMI_SMC_ENABLE_PKT_LEN;
	strio.ic_dp	= (char *)&set;
	if (ioctl(smcfd, I_STR, &strio) == -1) {
		syslog(LOG_ERR, gettext("SUNW_envmond:Request for"
			"Async messages failed"));
		(void) close(smcfd);
		create_polling_thr = B_TRUE;
		return (NULL);
	}

	/* Now wait for SMC events to come */
	for (;;) {
		poll_rc = poll(poll_fds, 1, -1); /* poll forever */
		if (poll_rc < 0) {
			syslog(LOG_ERR, gettext("SUNW_envmond:Event "
				"processing halted"));
			break;
		}
		if (env_process_smc_event(smcfd, &datap) == NO_EVENT) {
			syslog(LOG_ERR, gettext("SUNW_envmond:"
				"wrong event data posted from SMC"));
		}
	}
	(void) close(smcfd);
	create_polling_thr = B_TRUE;
	return (NULL);
}
Example #26
0
bool stop_serv()
{
    if(bServerOnline)  //Make sure it actually is online
    {
        gtk_widget_set_sensitive(start_and_stop_button, false);
        bServerOnline = false; //Make sure the server is actually off
        add_console_msg("[Main]",notification,"Waiting for all threads to complete");
        while(!check_if_all_threads_completed());
         /*Wait for safety!*/
        add_console_msg("[Main]",notification,"All threads have exited safely");

        for(unsigned int i=0; i<rgClients.size(); i++)
        {
            if(i==0) close(rgClients[0].uiSocketUDP);
            close(rgClients[i].uiSocketTCP);
        }
         /*Wait for safety!*/
        add_console_msg("[Main]",notification,"All sockets (including server's) have been closed");
        /*Back up all players here*/
        add_console_msg("[Main]",notification,"Pushing all accounts to save queue");
        for(unsigned int i=0; i<rgAccounts.size(); i++)
        {
            if(rgAccounts[i].status == active)
            {
                qSaveAccounts.push(rgAccounts[i]);
            }
        }

        add_console_msg("[Main]",notification,"Waiting for saving queue to finish");
        while(!qSaveAccounts.empty()); //Block until qSaveAccounts has been all worked upon
         /*Wait for safety!*/

        /*Cleaning up for next time init*/
        add_console_msg("[Main]",notification,"Cleaning up existing list of sockets, clients, and accounts");
        rgAccounts.clear();
         /*Wait for safety!*/
        rgClients.clear();
         /*Wait for safety!*/
        rgSockets.clear(); //Clear these parallel mother fuckers!

        /*Close databases*/
        add_console_msg("[Database]", notification,"Closing database/accounts.db");
        sqlite3_close(accounts_db);
        accounts_db = NULL;
        add_console_msg("[Database]", notification,"Accounts database has successfully closed");
         /*Wait for safety!*/
        add_console_msg("[Database]", notification,"Closing database/area.db");
        sqlite3_close(area_db);
        accounts_db = NULL;
        add_console_msg("[Database]", notification,"Area database has successfully closed");

        gtk_button_set_label((GtkButton*)start_and_stop_button, "Start");
        g_signal_handler_disconnect(start_and_stop_button, start_stop_button_ls_id);
        start_stop_button_ls_id = g_signal_connect_after(start_and_stop_button, "released", G_CALLBACK(init_serv), NULL);

        add_console_msg("[Main]",notification,"Server has stopped successfully");
        change_status_msg("<span foreground='red'><b>Offline</b></span> The server has stopped successfully");

        add_console_msg("[Server]", important_offline, "[Server is now offline]");
        poll(0,0,25);
        gtk_widget_set_sensitive(start_and_stop_button, true);
        bConsoleActive = false; //Turn off console
        return true;
    }
    return false;
}
Example #27
0
static void *
manager_input_loop( DirectThread *thread, void *arg )
{
     int            len;
     struct pollfd  pf;
     VoodooManager *manager = arg;

     manager->millis = direct_clock_get_millis();

     while (!manager->quit) {
          D_MAGIC_ASSERT( manager, VoodooManager );

          pf.fd     = manager->fd;
          pf.events = POLLIN;

          switch (poll( &pf, 1, 100 )) {
               case -1:
                    if (errno != EINTR) {
                         D_PERROR( "Voodoo/Input: Could not poll() the socket!\n" );
                         usleep( 200000 );
                    }
                    /* fall through */

               case 0:
                    continue;
          }

          pthread_mutex_lock( &manager->input.lock );

          while (manager->input.end == manager->input.max) {
               pthread_cond_wait( &manager->input.wait, &manager->input.lock );

               if (manager->quit)
                    break;
          }

          if (!manager->quit) {
               len = recv( manager->fd, manager->input.buffer + manager->input.end,
                           manager->input.max - manager->input.end, MSG_DONTWAIT );
               if (len < 0) {
                    switch (errno) {
                         case EINTR:
                         case EAGAIN:
                              break;
                         default:
                              D_PERROR( "Voodoo/Input: Could not recv() data!\n" );
                              usleep( 200000 );
                    }
               }
               else if (len > 0) {
                    D_DEBUG( "Voodoo/Input: Received %d bytes...\n", len );

                    manager->input.end += len;

                    pthread_cond_broadcast( &manager->input.wait );
               }
               else
                    handle_disconnect( manager );
          }

          pthread_mutex_unlock( &manager->input.lock );
     }

     return NULL;
}
Example #28
0
void *listen_thread_func(void* args)  //TCP's thread
{
    bThreadActive[0] = true;
    thread_args* my_args = (thread_args*) args;
    add_console_msg("[Listen]",notification, "Thread has started");


    add_console_msg("[Listen]",notification, "Doing safe cleanup of Sockets and Clients");
    if(rgSockets.size() > 0)
    {
        rgSockets.clear();
    }
    if(rgClients.size() > 0 )
    {
        rgClients.clear();
    }
    if(rgAccounts.size() > 0)
    {
        rgAccounts.clear();
    }


    pollfd MasterSocket;
    MasterSocket.fd = uiMasterSocketTCP;
    MasterSocket.events = POLLIN;
    rgSockets.push_back(MasterSocket);

    soft_client MasterClient(server_fill, uiMasterSocketTCP, uiMasterSocketUDP);
    rgClients.push_back(MasterClient);

    soft_account MasterAccount;
    MasterClient.set_account(MasterAccount);
    rgAccounts.push_back(MasterAccount);



    int PollActivity;
    pollfd* ptrPollSockets;
    add_console_msg("[Listen]",notification, "Poll is ready to read incoming connections");

    while(bServerOnline)
    {

        ptrPollSockets = &rgSockets[0];
        while(bServerOnline)
        {
            PollActivity = poll(ptrPollSockets, rgSockets.size(), POLL_DELAY);
            if(PollActivity !=0) break;
        }
        if(PollActivity < 0)
        {
            perror("tcp-poll");
            add_console_msg("[Listen]",warning, "Could not poll sockets");
        }
        if(rgSockets[0].revents & POLLIN)  //Server received a connection
        {
            unsigned int uiNewSocket;
            struct sockaddr_storage SNewClientAddr;
            socklen_t iNewClientAddr_Size = sizeof SNewClientAddr;
            if((uiNewSocket = accept(uiMasterSocketTCP, (struct sockaddr*)&SNewClientAddr, &iNewClientAddr_Size)) < 0)
            {
                perror("tcp-accept");
                add_console_msg("[Listen]", warning, "Failed to accept client");
            }
            else
            {

                add_console_msg("[Listen]", notification, "A client has successfully connected"); //Remove this later

                /*Create a pollfd for new socket*/
                pollfd NewSocket;
                NewSocket.fd = uiNewSocket;
                NewSocket.events = POLLIN;
                rgSockets.push_back(NewSocket);
                /*Create a new client for socket*/
                soft_client NewClient(uiNewSocket, SNewClientAddr); //takes TCP socket and udp address and port

                soft_account InactiveAccount; //Creates a blank account with inactivityand databaseID equals to 0
                NewClient.set_account(InactiveAccount); //This does not increment accounts logged in. You must LOG in to do so.
                rgClients.push_back(NewClient);
                rgAccounts.push_back(InactiveAccount);
                change_status_msg("<span foreground='green'><b>Online</b></span> <b>"+convert_to_str(rgSockets.size()-1)+"</b> user(s) connected");
                /*Add to Address Watchlist*/
                rgAddressWatch.push_back(SNewClientAddr);
            }
        }
        for(unsigned int i=1; i<rgSockets.size(); i++)
        {
            if(i != rgClients[i].uiPosition) rgClients[i].uiPosition = i; //Take uiPosition
            if(rgSockets[i].revents & POLLIN)
            {
                int read_val;
                char chBuffer[50];
                if((read_val = (recv(rgSockets[i].fd, chBuffer, 50, 0))) !=0)
                {



                    /*Players get linked to an account here*/

                }
                else
                {

                    /*Client has disconnected*/
                    add_console_msg("[Listen]", notification, "A client has disconnected");
                    rgClients[i].status = offline; //He will be taken care of in the next step. To keep order
                }
            }
        }
        /*Sweep disconnected players*/
        for(unsigned int i=1; i<rgClients.size(); i++)
        {
            if(rgClients[i].status == offline)
            {
                if(rgClients[i].AccountInfo.status == active)
                {
                    /*Save the account so far*/
                    add_console_msg("[Listen]",notification, "Client's account was pushed to the save queue");
                    qSaveAccounts.push(rgAccounts[i]); //Put in save queue
                }

                close(rgClients[i].uiSocketTCP); //Closes their TCP socket

                rgSockets.erase(rgSockets.begin()+i);
                rgClients.erase(rgClients.begin()+i); // then remove
                rgAccounts.erase(rgAccounts.begin()+i); //remove accounts whether inactive or not

                add_console_msg("[Listen]",notification, "Client and socket have been removed from polling");
                change_status_msg("<span foreground='green'><b>Online</b></span> <b>"+convert_to_str(rgSockets.size()-1)+"</b> user(s) connected");
            }

        }
        poll(0,0,CPU_DELAY); //Reduce CPU usage

    }
    ptrPollSockets = NULL;
    my_args = NULL;
    add_console_msg("[Listen]",notification, "Thread has exited successfully");
    bThreadActive[0] = false;
    return NULL;
}
Example #29
-1
smcp_status_t
smcp_plat_process(
	smcp_t self
) {
	SMCP_EMBEDDED_SELF_HOOK;
	smcp_status_t ret = 0;

	int tmp;
	struct pollfd polls[4];
	int poll_count;

	poll_count = smcp_plat_update_pollfds(self, polls, sizeof(polls)/sizeof(polls[0]));

	if (poll_count > (int)(sizeof(polls)/sizeof(*polls))) {
		poll_count = sizeof(polls)/sizeof(*polls);
	}

	errno = 0;

	tmp = poll(polls, poll_count, 0);

	// Ensure that poll did not fail with an error.
	require_action_string(
		errno == 0,
		bail,
		ret = SMCP_STATUS_ERRNO,
		strerror(errno)
	);

	if(tmp > 0) {
		for (tmp = 0; tmp < poll_count; tmp++) {
			if (!polls[tmp].revents) {
				continue;
			} else {
				char packet[SMCP_MAX_PACKET_LENGTH+1];
				smcp_sockaddr_t remote_saddr = {};
				smcp_sockaddr_t local_saddr = {};
				ssize_t packet_len = 0;
				char cmbuf[0x100];
				struct iovec iov = { packet, SMCP_MAX_PACKET_LENGTH };
				struct msghdr msg = {
					.msg_name = &remote_saddr,
					.msg_namelen = sizeof(remote_saddr),
					.msg_iov = &iov,
					.msg_iovlen = 1,
					.msg_control = cmbuf,
					.msg_controllen = sizeof(cmbuf),
				};
				struct cmsghdr *cmsg;

				packet_len = recvmsg(polls[tmp].fd, &msg, 0);

				require_action(packet_len > 0, bail, ret = SMCP_STATUS_ERRNO);

				packet[packet_len] = 0;

				for (
					cmsg = CMSG_FIRSTHDR(&msg);
					cmsg != NULL;
					cmsg = CMSG_NXTHDR(&msg, cmsg)
				) {
					if (cmsg->cmsg_level != SMCP_IPPROTO
						|| cmsg->cmsg_type != SMCP_PKTINFO
					) {
						continue;
					}

					// Preinitialize some of the fields.
					local_saddr = remote_saddr;

#if SMCP_BSD_SOCKETS_NET_FAMILY==AF_INET6
					struct in6_pktinfo *pi = (struct in6_pktinfo *)CMSG_DATA(cmsg);
					local_saddr.smcp_addr = pi->ipi6_addr;
					local_saddr.sin6_scope_id = pi->ipi6_ifindex;

#elif SMCP_BSD_SOCKETS_NET_FAMILY==AF_INET
					struct in_pktinfo *pi = (struct in_pktinfo *)CMSG_DATA(cmsg);
					local_saddr.smcp_addr = pi->ipi_addr;
#endif

					local_saddr.smcp_port = htons(get_port_for_fd(polls[tmp].fd));

					self->plat.pktinfo = *pi;
				}

				smcp_set_current_instance(self);
				smcp_plat_set_remote_sockaddr(&remote_saddr);
				smcp_plat_set_local_sockaddr(&local_saddr);

				if (self->plat.fd_udp == polls[tmp].fd) {
					smcp_plat_set_session_type(SMCP_SESSION_TYPE_UDP);

					ret = smcp_inbound_packet_process(self, packet, (coap_size_t)packet_len, 0);
					require_noerr(ret, bail);

#if SMCP_DTLS
				} else if (self->plat.fd_dtls == polls[tmp].fd) {
					smcp_plat_set_session_type(SMCP_SESSION_TYPE_DTLS);
					smcp_plat_ssl_inbound_packet_process(
						self,
						packet,
						(coap_size_t)packet_len
					);
#endif
				}
			}
		}
	}

	smcp_handle_timers(self);

bail:
	smcp_set_current_instance(NULL);
	self->is_responding = false;
	return ret;
}

smcp_status_t
smcp_plat_lookup_hostname(const char* hostname, smcp_sockaddr_t* saddr, int flags)
{
	smcp_status_t ret;
	struct addrinfo hint = {
		.ai_flags		= AI_ADDRCONFIG,
		.ai_family		= AF_UNSPEC,
	};

	struct addrinfo *results = NULL;
	struct addrinfo *iter = NULL;

#if SMCP_BSD_SOCKETS_NET_FAMILY != AF_INET6
	hint.ai_family = SMCP_BSD_SOCKETS_NET_FAMILY;
#endif

	if ((flags & (SMCP_LOOKUP_HOSTNAME_FLAG_IPV4_ONLY|SMCP_LOOKUP_HOSTNAME_FLAG_IPV6_ONLY)) == (SMCP_LOOKUP_HOSTNAME_FLAG_IPV4_ONLY|SMCP_LOOKUP_HOSTNAME_FLAG_IPV6_ONLY)) {
		ret = SMCP_STATUS_INVALID_ARGUMENT;
		goto bail;
	} else if ((flags & SMCP_LOOKUP_HOSTNAME_FLAG_IPV4_ONLY) == SMCP_LOOKUP_HOSTNAME_FLAG_IPV4_ONLY) {
		hint.ai_family = AF_INET;
	} else if ((flags & SMCP_LOOKUP_HOSTNAME_FLAG_IPV6_ONLY) == SMCP_LOOKUP_HOSTNAME_FLAG_IPV6_ONLY) {
		hint.ai_family = AF_INET6;
	}

	memset(saddr, 0, sizeof(*saddr));
	saddr->___smcp_family = SMCP_BSD_SOCKETS_NET_FAMILY;

#if SOCKADDR_HAS_LENGTH_FIELD
	saddr->___smcp_len = sizeof(*saddr);
#endif

	int error = getaddrinfo(hostname, NULL, &hint, &results);

#if SMCP_BSD_SOCKETS_NET_FAMILY==AF_INET6
	if(error && (inet_addr(hostname) != INADDR_NONE)) {
		char addr_v4mapped_str[8 + strlen(hostname)];
		hint.ai_family = AF_INET6;
		hint.ai_flags = AI_ALL | AI_V4MAPPED,
		strcpy(addr_v4mapped_str,"::ffff:");
		strcat(addr_v4mapped_str,hostname);
		error = getaddrinfo(addr_v4mapped_str,
			NULL,
			&hint,
			&results
		);
	}
#endif

	if (EAI_AGAIN == error) {
		ret = SMCP_STATUS_WAIT_FOR_DNS;
		goto bail;
	}

#ifdef TM_EWOULDBLOCK
	if (TM_EWOULDBLOCK == error) {
		ret = SMCP_STATUS_WAIT_FOR_DNS;
		goto bail;
	}
#endif

	require_action_string(
		!error,
		bail,
		ret = SMCP_STATUS_HOST_LOOKUP_FAILURE,
		gai_strerror(error)
	);

	// Move to the first recognized result
	for(iter = results;iter && (iter->ai_family!=AF_INET6 && iter->ai_family!=AF_INET);iter=iter->ai_next);

	require_action(
		iter,
		bail,
		ret = SMCP_STATUS_HOST_LOOKUP_FAILURE
	);

#if SMCP_BSD_SOCKETS_NET_FAMILY==AF_INET6
	if(iter->ai_family == AF_INET) {
		struct sockaddr_in *v4addr = (void*)iter->ai_addr;
		saddr->sin6_addr.s6_addr[10] = 0xFF;
		saddr->sin6_addr.s6_addr[11] = 0xFF;
		memcpy(&saddr->sin6_addr.s6_addr[12], &v4addr->sin_addr.s_addr, 4);
	} else
#endif
	if(iter->ai_family == SMCP_BSD_SOCKETS_NET_FAMILY) {
		memcpy(saddr, iter->ai_addr, iter->ai_addrlen);
	}

	if(SMCP_IS_ADDR_MULTICAST(&saddr->smcp_addr)) {
		smcp_t const self = smcp_get_current_instance();
		check(self->outbound.packet->tt != COAP_TRANS_TYPE_CONFIRMABLE);
		if(self->outbound.packet->tt == COAP_TRANS_TYPE_CONFIRMABLE) {
			self->outbound.packet->tt = COAP_TRANS_TYPE_NONCONFIRMABLE;
		}
	}

	ret = SMCP_STATUS_OK;

bail:
	if(results)
		freeaddrinfo(results);
	return ret;
}
Example #30
-12
bool init_serv()
{
    if(!bServerOnline)  //Make sure it is offline.
    {
        activate_console();
        gtk_widget_set_sensitive(start_and_stop_button, false); /*Gray it out*/

        init_args xml_args = load_external_files();
        if(!xml_args.bSuccess)
        {
            gtk_widget_set_sensitive(start_and_stop_button, true);
            add_console_msg("[Server]",error,"has ended init_serv() process");
            return false; //End server start up right away
        }

        bool bErrorFree = true;

        for(unsigned int i=0; i<sizeof(bThreadActive)/sizeof(bool); i++) //Start all threads false
        {
            bThreadActive[i] = false;

        }

        uiMasterSocketTCP = socket(AF_INET, SOCK_STREAM, 0);
        if(uiMasterSocketTCP < 0)
        {
            perror("tcp-socket");
            add_console_msg("[Init]",error,"Could not initialize TCP socket");
            return false;
        }

        uiMasterSocketUDP = socket(AF_INET, SOCK_DGRAM, 0);
        if(uiMasterSocketUDP < 0)
        {
            perror("udp-socket");
            add_console_msg("[Init]",error,"Could not initialize UDP socket");
            return false;
        }

        memset(&SBindTCPAddress, 0, sizeof(SBindTCPAddress)); //Memset to make sure
        memset(&SBindUDPAddress, 0, sizeof(SBindUDPAddress)); //Memset to make sure

        SBindTCPAddress.sin_family = AF_INET;
        SBindUDPAddress.sin_family = AF_INET;
        SBindTCPAddress.sin_port = htons(xml_args.uiPortTCP);
        SBindUDPAddress.sin_port = htons(xml_args.uiPortUDP);
        SBindTCPAddress.sin_addr.s_addr = INADDR_ANY;
        SBindUDPAddress.sin_addr.s_addr = INADDR_ANY;



        if(bind(uiMasterSocketTCP, (struct sockaddr*) &SBindTCPAddress, sizeof SBindTCPAddress) < 0)
        {
            perror("tcp-bind"); //Perrors are only for us to debug
            add_console_msg("[Init]",error, "Could not bind TCP"); //These are for the end-user
            bErrorFree = false;
            return false;
        }
        if(bind(uiMasterSocketUDP, (struct sockaddr*) &SBindUDPAddress, sizeof SBindUDPAddress)  < 0)
        {
            perror("udp-bind");
            add_console_msg("[Init]",error, "Could not bind UDP");
            bErrorFree = false;
            return false;
        }
        if(listen(uiMasterSocketTCP, xml_args.uiMaxQueue) < 0)
        {
            perror("tcp-listen");
            add_console_msg("[Init]",error, "Could not listen on TCP socket");
            bErrorFree = false;
            return false;
        }

        bServerOnline = bErrorFree;
        if(bErrorFree)
        {


            /*listen thread*/
            thread_args listen_thread_args;
            pthread_create(&listen_thread, NULL, listen_thread_func, &listen_thread_args);

            /*flow thread*/
            thread_args flow_thread_args;
            pthread_create(&flow_thread, NULL, flow_thread_func, &flow_thread_args);

            thread_args save_thread_args;
            pthread_create(&save_thread, NULL, save_thread_func, &save_thread_args);

            while(true)
            {
                poll(0,0, CPU_DELAY);
                if(check_if_all_threads_running())
                {
                    gtk_widget_set_sensitive(start_and_stop_button, true); /*ungray*/
                    gtk_button_set_label((GtkButton*)start_and_stop_button, "Stop");
                    g_signal_handler_disconnect(start_and_stop_button, start_stop_button_ls_id);
                    start_stop_button_ls_id = g_signal_connect_after(start_and_stop_button, "released", G_CALLBACK(stop_serv), NULL);
                    change_status_msg("<span foreground='green'><b>Online</b></span> <b>0</b> user(s) connected");
                    add_console_msg("[Server]", important_online, "[Server is now online]");
                    break;
                }

            }


            return true;
        }
    }
    return false;
}