Exemplo n.º 1
0
Widget::Widget(QWidget *parent) :
    QWidget(parent),
    ui(new Ui::Widget)
{
    ui->setupUi(this);
    this->resize(275, 600);
    choosew = new ChooseWidget(this);
    //HeadButton *h = new HeadButton(this);
    /*
    list = new QListWidget(this);
    list->resize(200, 600);
    FriendWidget *f = new FriendWidget();
    QListWidgetItem *i = new QListWidgetItem();
    i->setSizeHint(QSize(200, 50));
    list->addItem(i);
    list->setItemWidget(i, f);

    tree = new QTreeWidget(this);
    tree->resize(275, 600);
    tree->move(200, 0);
    QStringList str;
    str << QObject::tr("张三");
    QTreeWidgetItem *treeitem = new QTreeWidgetItem(tree, str);
    QTreeWidgetItem *child1 = new QTreeWidgetItem(treeitem, str);
    treeitem->addChild(child1);
    */
    createTray();

    chat_server = new QTcpServer(this);
    chat_server->listen(QHostAddress::Any, 55554);
    connect(chat_server, SIGNAL(newConnection()), this, SLOT(receive_connection()));
    connect(choosew, SIGNAL(to_up_msg(QString,QString)), this, SLOT(send_msg(QString, QString)));
    connect(choosew, SIGNAL(new_conn(QString, QString)), this, SLOT(new_conn(QString, QString)));
    connect(this, SIGNAL(rec_msg(QString,QString)), choosew, SLOT(from_up_msg(QString,QString)));
}
Exemplo n.º 2
0
/* This gets called by glib whenever data is received from the fifo */
static gboolean fifo_cb(GIOChannel *ch, GIOCondition condition, gpointer data)
{
#define BUF_SIZE 1024
  gsize len, tp;
  gchar *buf, *tmp, *all = NULL;
  GIOStatus rv;

  do {
    GError *err = NULL;
    rv = g_io_channel_read_line(ch, &buf, &len, &tp, &err);
    if(buf) {
      if(tp) {
        buf[tp]='\0';
      }
      new_conn(buf, (GlobalInfo*)data);
      g_free(buf);
    }
    else {
      buf = g_malloc(BUF_SIZE + 1);
      while(TRUE) {
        buf[BUF_SIZE]='\0';
        g_io_channel_read_chars(ch, buf, BUF_SIZE, &len, &err);
        if(len) {
          buf[len]='\0';
          if(all) {
            tmp = all;
            all = g_strdup_printf("%s%s", tmp, buf);
            g_free(tmp);
          }
          else {
            all = g_strdup(buf);
          }
        }
        else {
          break;
        }
      }
      if(all) {
        new_conn(all, (GlobalInfo*)data);
        g_free(all);
      }
      g_free(buf);
    }
    if(err) {
      g_error("fifo_cb: %s", err->message);
      g_free(err);
      break;
    }
  } while((len) && (rv == G_IO_STATUS_NORMAL));
  return TRUE;
}
Exemplo n.º 3
0
  /// Constructor opens the acceptor and starts waiting for the first incoming
  /// connection.
  server(asio::io_context& io_context, unsigned short port)
    : acceptor_(io_context,
        asio::ip::tcp::endpoint(asio::ip::tcp::v4(), port))
  {
    // Create the data to be sent to each client.
    stock s;
    s.code = "ABC";
    s.name = "A Big Company";
    s.open_price = 4.56;
    s.high_price = 5.12;
    s.low_price = 4.33;
    s.last_price = 4.98;
    s.buy_price = 4.96;
    s.buy_quantity = 1000;
    s.sell_price = 4.99;
    s.sell_quantity = 2000;
    stocks_.push_back(s);
    s.code = "DEF";
    s.name = "Developer Entertainment Firm";
    s.open_price = 20.24;
    s.high_price = 22.88;
    s.low_price = 19.50;
    s.last_price = 19.76;
    s.buy_price = 19.72;
    s.buy_quantity = 34000;
    s.sell_price = 19.85;
    s.sell_quantity = 45000;
    stocks_.push_back(s);

    // Start an accept operation for a new connection.
    connection_ptr new_conn(new connection(acceptor_.get_io_context()));
    acceptor_.async_accept(new_conn->socket(),
        boost::bind(&server::handle_accept, this,
          asio::placeholders::error, new_conn));
  }
Exemplo n.º 4
0
int main(int argc, char **argv)
{
  GlobalInfo g;
  CURLMcode rc;

  (void)argc;
  (void)argv;

  memset(&g, 0, sizeof(GlobalInfo));
  g.multi = curl_multi_init();

  curl_multi_setopt(g.multi, CURLMOPT_SOCKETFUNCTION, sock_cb);
  curl_multi_setopt(g.multi, CURLMOPT_SOCKETDATA, &g);
  curl_multi_setopt(g.multi, CURLMOPT_TIMERFUNCTION, multi_timer_cb);
  curl_multi_setopt(g.multi, CURLMOPT_TIMERDATA, &g);

  new_conn((char *)"www.google.com", &g);  /* add a URL */

  /* enter io_service run loop */
  io_service.run();

  curl_multi_cleanup(g.multi);

  fprintf(MSG_OUT, "\ndone.\n");

  return 0;
}
Exemplo n.º 5
0
  /// Handle completion of a accept operation.
  void handle_accept(const asio::error_code& e, connection_ptr conn)
  {
    if (!e)
    {
      // Successfully accepted a new connection. Send the list of stocks to the
      // client. The connection::async_write() function will automatically
      // serialize the data structure for us.
      conn->async_write(stocks_,
          boost::bind(&server::handle_write, this,
            asio::placeholders::error, conn));

      // Start an accept operation for a new connection.
      connection_ptr new_conn(new connection(acceptor_.io_service()));
      acceptor_.async_accept(new_conn->socket(),
          boost::bind(&server::handle_accept, this,
            asio::placeholders::error, new_conn));
    }
    else
    {
      // An error occurred. Log it and return. Since we are not starting a new
      // accept operation the io_service will run out of work to do and the
      // server will exit.
      std::cerr << e.message() << std::endl;
    }
  }
Exemplo n.º 6
0
static int run(int fdServerSock)
{
    int ret;
    struct pollfd pfd;
    pfd.fd = fdServerSock;
    pfd.events = POLLIN;
    while (s_run)
    {
        ret = poll(&pfd, 1, 1000);
        if (ret == 1)
        {
            int fd = accept(fdServerSock, NULL, NULL);
            if (fd != -1)
                new_conn(fd);
            else
                perror("lscgid: accept() failed");
        }
        else
        {
            if (getppid() == 1)
                return 1;
        }
    }
    return 0;
}
Exemplo n.º 7
0
Arquivo: sigyn.c Projeto: alyx/sigyn
int main(int argc, char *argv[])
{
    me.ev = mowgli_eventloop_create();

    signals_init();

    parse_commandline_options(argc, argv);

    me.config = mowgli_config_file_load(config_file);

    if(me.config == NULL)
        sigyn_fatal("Cannot load configuration file.");
    logger_init(me.config->entries);
    config_check(me.config);

    me.uplink.line = new_conn(me.uplink.hostname, me.uplink.port, me.uplink.ssl, read_irc, NULL);
    if (me.uplink.line == NULL)
        sigyn_fatal("Connection to uplink failed.");
    me.uplink.connected = true;

    loadmodules(me.config->entries);

    sigyn_introduce_client(me.client->nick, me.client->user, NULL);
    if (should_fork)
        daemonise(SYSCONFDIR "/sigyn.pid");
    mowgli_eventloop_run(me.ev);

    sigyn_cleanup();
    return EXIT_SUCCESS;
}
Exemplo n.º 8
0
void add_first_call(arguments *arg, global_info *global) {
    /* Copy the main url */
    size_t root_url_len = strnlen(arg->url, MAX_URL_LEN + 10);
    if (root_url_len > MAX_URL_LEN) {
        orcerror("%s : url is larger than defacto limit %d\n", arg->url);
        exit(EXIT_FAILURE);
    }
    char *root_url = calloc(root_url_len + 1, sizeof(char));
    if (0 == root_url) {
        orcerror("%s (%d)\n", strerror(errno), errno);
        exit(EXIT_FAILURE);
    }
    /* Create a info value for the main url */
    url_info *info = calloc(1, sizeof(*info));
    if (0 == info) {
        orcerror("%s (%d)\n", strerror(errno), errno);
        exit(EXIT_FAILURE);
    }
    strncpy(root_url, arg->url, root_url_len);
    root_url[root_url_len] = '\0';
    /* Add a connection to the url where we will start the spider */
    new_conn(root_url, global);
    /* root_url and info are freed in bintree_free */
    bintree_add(&(global->url_tree), root_url, info);
}
Exemplo n.º 9
0
unsigned agent_impl::connect(const char* ipaddr, unsigned short port, 
				 int32_t timeout_millis, unsigned short local_port)
{
	connection_ptr new_conn(new connection(*this, get_io_service(), net_callback_));
	add_connection(new_conn);
	new_conn->async_connect(ipaddr, port, timeout_millis, local_port);
	return new_conn->get_id();
}
Exemplo n.º 10
0
void on_connect(SOCK s,void*ud,int err)
{
    if(s != INVALID_SOCK) {
        struct connection * con = new_conn(s,0);
        add_client(con);
        bind2engine((ENGINE)ud,con,on_process_packet,remove_client);
    }
}
Exemplo n.º 11
0
void ChooseConnectionDlg::on_actionClone_triggered()
{
    if (!m_current)
        return;

    ConnectionPointer<Connection> new_conn(m_current->clone());
    sConMgr2.addUserOwnedConn(new_conn.data());
    this->focusNewConn(new_conn.take());
}
Exemplo n.º 12
0
int main(int argc, char **argv)
{
	if (!new_conn(argv[1], argv[2], echo_start, NULL))
		fatal("failed to create a conn to %s %s!\n", argv[1], argv[2]);

	while (1)
		conn_loop ();
	return 0;
}
Exemplo n.º 13
0
void accept_client(SOCK s,void*ud)
{
	struct connection *c = new_conn(s,0);
	add_client(c);
	struct netservice *tcpserver = (struct netservice *)ud;
	tcpserver->bind(tcpserver,c,on_process_packet,remove_client
					,5000,c_recv_timeout,5000,c_send_timeout
					);
}
Exemplo n.º 14
0
	/// handle appelé après acceptation
	void handle_accept(const boost::system::error_code& e, connection_ptr conn)	{
		if (!e)	{	// acceptation réussie
			// lecture des informations
			conn->async_read(infos, boost::bind(&server::handle_read, this, boost::asio::placeholders::error, conn));
		}
		// démarrage d'une nouvelle connection
		connection_ptr new_conn(new connection(acceptor_.get_io_service()));
		acceptor_.async_accept(new_conn->socket(), boost::bind(&server::handle_accept, this, boost::asio::placeholders::error, new_conn));

	}
int main(int argc, char **argv) {
    std::cout << std::endl << __PRETTY_FUNCTION__ << " called" << std::endl;
    GlobalInfo g;

    (void) argc;
    (void) argv;

    memset(&g, 0, sizeof(GlobalInfo));
    g.multi = curl_multi_init();

    // 1 cb for socket
    curl_multi_setopt(g.multi, CURLMOPT_SOCKETFUNCTION, sock_cb);
    curl_multi_setopt(g.multi, CURLMOPT_SOCKETDATA, &g);

    // 2 timer cb
    curl_multi_setopt(g.multi, CURLMOPT_TIMERFUNCTION, multi_timer_cb);
    curl_multi_setopt(g.multi, CURLMOPT_TIMERDATA, &g);

    new_conn((char *) "www.google.com", &g); /* add a URL */
    new_conn((char *) "www.yaoop1.com", &g); /* add a URL */
    new_conn((char *) "www.yahoo.com", &g); /* add a URL */
    new_conn((char *) "www.google.com", &g); /* add a URL */
    new_conn((char *) "www.yahoo.com", &g); /* add a URL */
    new_conn((char *) "www.yahoo.com", &g); /* add a URL */
    new_conn((char *) "www.yahoo.com", &g); /* add a URL */
    new_conn((char *) "www.google.com", &g); /* add a URL */
    new_conn((char *) "www.google.com", &g); /* add a URL */
    new_conn((char *) "www.google.com", &g); /* add a URL */

    /* enter io_service run loop */
    io_service.run();

    curl_multi_cleanup(g.multi);
    fprintf(MSG_OUT, "\ndone.\n");

    return 0;
}
Exemplo n.º 16
0
void new_connection(SOCK sock,struct sockaddr_in *addr_remote,void *ud)
{
	struct connection *c = new_conn(sock,1);
    c->cb_disconnect = asyncb_disconnect;
    asynsock_t d = asynsock_new(c,INVALID_SOCK);
	msgque_t mq =  (msgque_t)ud;
	struct msg_connection *msg = calloc(1,sizeof(*msg));
	msg->base._ident = TO_IDENT(d->sident);
    msg->base.type = MSG_ONCONNECT;
	get_addr_remote(d->sident,msg->ip,32);
	get_port_remote(d->sident,&msg->port);

    if(0 != msgque_put_immeda(mq,(lnode*)msg)){
        asynsock_release(d);
		free(msg);
	}
}
Exemplo n.º 17
0
static void read_new_pages(global_info *global) {
    int max_count = global->job_max;
    char* url = 0;
    while (global->job_count <= max_count && 0 != (url = url_get(global))) {
        url_info *info = 0;
        if (0 != (info = (url_info *)bintree_find(&(global->url_tree), url))) {
            info->found_count++;
            orcstatus(orcm_debug, orc_cyan, "counted", "%s\n", url);
            free(url);
        } else {
            info = calloc(1, sizeof(*info));
            info->found_count++;
            bintree_add(&(global->url_tree), url, info);
            new_conn(url, global);
        }
    }
}
Exemplo n.º 18
0
void on_connect(SOCK s,struct sockaddr_in *addr_remote, void *ud,int err)
{
    if(s != INVALID_SOCK){
        struct connection * con = new_conn(s,0);
        struct netservice *tcpclient = (struct netservice *)ud;
		tcpclient->bind(tcpclient,con,65536,on_process_packet,NULL
						,0,NULL,0,NULL);
        //发送登录请求
        
        wpacket_t wpk = NEW_WPK(64);
        wpk_write_uint16(wpk,CMD_C2GATE_LOGIN);
        wpk_write_string(wpk,"huangwei");
        wpk_write_string(wpk,"198272");
        send_packet(con,wpk);
        //wpk_write_binary(wpk,(void*)msg,send_size);
        //send_packet(con,wpk);
    }
}
Exemplo n.º 19
0
/* This gets called whenever data is received from the fifo */
static void fifo_cb(int fd, short event, void *arg)
{
  char s[1024];
  long int rv=0;
  int n=0;
  GlobalInfo *g = (GlobalInfo *)arg;
  (void)fd; /* unused */
  (void)event; /* unused */

  do {
    s[0]='\0';
    rv=fscanf(g->input, "%1023s%n", s, &n);
    s[n]='\0';
    if ( n && s[0] ) {
      new_conn(s,arg);  /* if we read a URL, go get it! */
    } else break;
  } while ( rv != EOF);
}
Exemplo n.º 20
0
  /// Handle completion of a accept operation.
  void handle_accept(const asio::error_code& e, connection_ptr conn)
  {
    if (!e)
    {
      // Successfully accepted a new connection. Send the list of stocks to the
      // client. The connection::async_write() function will automatically
      // serialize the data structure for us.
      conn->async_write(stocks_,
          boost::bind(&server::handle_write, this,
            asio::placeholders::error, conn));
    }

    // Start an accept operation for a new connection.
    connection_ptr new_conn(new connection(acceptor_.get_io_context()));
    acceptor_.async_accept(new_conn->socket(),
        boost::bind(&server::handle_accept, this,
          asio::placeholders::error, new_conn));
  }
Exemplo n.º 21
0
/* This gets called whenever data is received from the fifo */
static void fifo_cb(EV_P_ struct ev_io *w, int revents)
{
  char s[1024];
  long int rv=0;
  int n=0;
  GlobalInfo *g = (GlobalInfo *)w->data;

  do
  {
    s[0]='\0';
    rv=fscanf(g->input, "%1023s%n", s, &n);
    s[n]='\0';
    if ( n && s[0] )
    {
      new_conn(s,g);  /* if we read a URL, go get it! */
    } else break;
  } while ( rv != EOF );
}
Exemplo n.º 22
0
/*
  called when a listening socket becomes readable.
*/
static void prefork_accept_connection(
	struct tevent_context *ev,
	struct loadparm_context *lp_ctx,
	struct socket_context *listen_socket,
	void (*new_conn)(struct tevent_context *,
			struct loadparm_context *,
			struct socket_context *,
			struct server_id,
			void *,
			void *),
	void *private_data,
	void *process_context)
{
	NTSTATUS status;
	struct socket_context *connected_socket;
	pid_t pid = getpid();

	/* accept an incoming connection. */
	status = socket_accept(listen_socket, &connected_socket);
	if (!NT_STATUS_IS_OK(status)) {
		/*
		 * For prefork we can ignore STATUS_MORE_ENTRIES, as  once a
		 * connection becomes available all waiting processes are
		 * woken, but only one gets work to  process.
		 * AKA the thundering herd.
		 * In the short term this should not be an issue as the number
		 * of workers should be a small multiple of the number of cpus
		 * In the longer term socket_accept needs to implement a
		 * mutex/semaphore (like apache does) to serialise the accepts
		 */
		if (!NT_STATUS_EQUAL(status, STATUS_MORE_ENTRIES)) {
			DBG_ERR("Worker process (%d), error in accept [%s]\n",
				getpid(), nt_errstr(status));
		}
		return;
	}

	talloc_steal(private_data, connected_socket);

	new_conn(ev, lp_ctx, connected_socket,
		 cluster_id(pid, socket_get_fd(connected_socket)),
		 private_data, process_context);
}
Exemplo n.º 23
0
		void	tcp_server::on_new_conn(SOCKET_FD fd, const endpoint_v4& addr)
		{
			
			io_service* ios_ptr = m_service_pool.next_service();
			connection_ptr new_conn(new connection(*ios_ptr,fd,addr));

			new_conn->set_close_callback(boost::bind(&tcp_server::on_remove_conn,this,_1));
			new_conn->set_read_callback(m_read_cb);
			new_conn->set_write_callback(m_write_cb);
			new_conn->set_conn_callback(m_conn_cb);
			new_conn->set_error_callback(m_err_cb);
			// 
			m_conn_map[fd]=new_conn;
			
			//LOG_INFO.stream()<<"new conn form :"<< addr.get_ip() << " : " << addr.get_port();
			
			/// notify connection
			new_conn->start();
		}
Exemplo n.º 24
0
/*
  called when a listening socket becomes readable. 
*/
static void single_accept_connection(struct tevent_context *ev, 
				     struct loadparm_context *lp_ctx,
				     struct socket_context *listen_socket,
				     void (*new_conn)(struct tevent_context *, 
						      struct loadparm_context *,
						      struct socket_context *, 
						      struct server_id , void *), 
				     void *private_data)
{
	NTSTATUS status;
	struct socket_context *connected_socket;
	pid_t pid = getpid();

	/* accept an incoming connection. */
	status = socket_accept(listen_socket, &connected_socket);
	if (!NT_STATUS_IS_OK(status)) {
		DEBUG(0,("single_accept_connection: accept: %s\n", nt_errstr(status)));
		/* this looks strange, but is correct. 

		   We can only be here if woken up from select, due to
		   an incoming connection.

		   We need to throttle things until the system clears
		   enough resources to handle this new socket. 

		   If we don't then we will spin filling the log and
		   causing more problems. We don't panic as this is
		   probably a temporary resource constraint */
		sleep(1);
		return;
	}

	talloc_steal(private_data, connected_socket);

	/*
	 * We use the PID so we cannot collide in with cluster ids
	 * generated in other single mode tasks, and, and won't
	 * collide with PIDs from process model standard because a the
	 * combination of pid/fd should be unique system-wide
	 */
	new_conn(ev, lp_ctx, connected_socket,
		 cluster_id(pid, socket_get_fd(connected_socket)), private_data);
}
Exemplo n.º 25
0
/*
  called when a listening socket becomes readable.
*/
static void onefork_accept_connection(struct tevent_context *ev,
				      struct loadparm_context *lp_ctx,
				      struct socket_context *listen_socket,
				       void (*new_conn)(struct tevent_context *,
							struct loadparm_context *, struct socket_context *,
							struct server_id , void *),
				       void *private_data)
{
	NTSTATUS status;
	struct socket_context *connected_socket;
	pid_t pid = getpid();

	/* accept an incoming connection. */
	status = socket_accept(listen_socket, &connected_socket);
	if (!NT_STATUS_IS_OK(status)) {
		return;
	}

	talloc_steal(private_data, connected_socket);

	new_conn(ev, lp_ctx, connected_socket, cluster_id(pid, socket_get_fd(connected_socket)), private_data);
}
Exemplo n.º 26
0
static struct fuse_conn *get_conn(struct file *file, struct super_block *sb)
{
	struct fuse_conn *fc;

	if (file->f_op != &fuse_dev_operations)
		return ERR_PTR(-EINVAL);
	fc = new_conn();
	if (fc == NULL)
		return ERR_PTR(-ENOMEM);
	spin_lock(&fuse_lock);
	if (file->private_data) {
		free_conn(fc);
		fc = ERR_PTR(-EINVAL);
	} else {
		file->private_data = fc;
		*get_fuse_conn_super_p(sb) = fc;
		fc->mounted = 1;
		fc->connected = 1;
		fc->count = 2;
	}
	spin_unlock(&fuse_lock);
	return fc;
}
Exemplo n.º 27
0
static int fuse_fill_super(struct super_block *sb, void *data, int silent)
{
	struct fuse_conn *fc;
	struct inode *root;
	struct fuse_mount_data d;
	struct file *file;
	struct dentry *root_dentry;
	struct fuse_req *init_req;
	int err;
	int is_bdev = sb->s_bdev != NULL;

	if (sb->s_flags & MS_MANDLOCK)
		return -EINVAL;

	if (!parse_fuse_opt((char *) data, &d, is_bdev))
		return -EINVAL;

	if (is_bdev) {
#ifdef CONFIG_BLOCK
		if (!sb_set_blocksize(sb, d.blksize))
			return -EINVAL;
#endif
	} else {
		sb->s_blocksize = PAGE_CACHE_SIZE;
		sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
	}
	sb->s_magic = FUSE_SUPER_MAGIC;
	sb->s_op = &fuse_super_operations;
	sb->s_maxbytes = MAX_LFS_FILESIZE;
	sb->s_export_op = &fuse_export_operations;

	file = fget(d.fd);
	if (!file)
		return -EINVAL;

	if (file->f_op != &fuse_dev_operations)
		return -EINVAL;

	fc = new_conn(sb);
	if (!fc)
		return -ENOMEM;

	fc->flags = d.flags;
	fc->user_id = d.user_id;
	fc->group_id = d.group_id;
	fc->max_read = max_t(unsigned, 4096, d.max_read);

	/* Used by get_root_inode() */
	sb->s_fs_info = fc;

	err = -ENOMEM;
	root = get_root_inode(sb, d.rootmode);
	if (!root)
		goto err;

	root_dentry = d_alloc_root(root);
	if (!root_dentry) {
		iput(root);
		goto err;
	}

	init_req = fuse_request_alloc();
	if (!init_req)
		goto err_put_root;

	if (is_bdev) {
		fc->destroy_req = fuse_request_alloc();
		if (!fc->destroy_req)
			goto err_free_init_req;
	}

	mutex_lock(&fuse_mutex);
	err = -EINVAL;
	if (file->private_data)
		goto err_unlock;

	err = fuse_ctl_add_conn(fc);
	if (err)
		goto err_unlock;

	list_add_tail(&fc->entry, &fuse_conn_list);
	sb->s_root = root_dentry;
	fc->connected = 1;
	file->private_data = fuse_conn_get(fc);
	mutex_unlock(&fuse_mutex);
	/*
	 * atomic_dec_and_test() in fput() provides the necessary
	 * memory barrier for file->private_data to be visible on all
	 * CPUs after this
	 */
	fput(file);

	fuse_send_init(fc, init_req);

	return 0;

 err_unlock:
	mutex_unlock(&fuse_mutex);
 err_free_init_req:
	fuse_request_free(init_req);
 err_put_root:
	dput(root_dentry);
 err:
	fput(file);
	fuse_conn_put(fc);
	return err;
}
Exemplo n.º 28
0
	// constucteur
	server(boost::asio::io_service& io_service, unsigned short port)
: acceptor_(io_service, boost::asio::ip::tcp::endpoint(boost::asio::ip::tcp::v4(), port)) {
		// accepte une nouvelle connexion
		connection_ptr new_conn(new connection(acceptor_.get_io_service()));
		acceptor_.async_accept(new_conn->socket(), boost::bind(&server::handle_accept, this, boost::asio::placeholders::error, new_conn));
	}
Exemplo n.º 29
0
/*
  called when a listening socket becomes readable. 
*/
static void standard_accept_connection(struct tevent_context *ev, 
				       struct loadparm_context *lp_ctx,
				       struct socket_context *sock, 
				       void (*new_conn)(struct tevent_context *,
							struct loadparm_context *, struct socket_context *, 
							struct server_id , void *), 
				       void *private_data)
{
	NTSTATUS status;
	struct socket_context *sock2;
	pid_t pid;
	struct socket_address *c, *s;
	struct standard_child_state *state;
	struct tevent_fd *fde = NULL;
	struct tevent_signal *se = NULL;

	state = setup_standard_child_pipe(ev, NULL);
	if (state == NULL) {
		return;
	}

	/* accept an incoming connection. */
	status = socket_accept(sock, &sock2);
	if (!NT_STATUS_IS_OK(status)) {
		DEBUG(0,("standard_accept_connection: accept: %s\n",
			 nt_errstr(status)));
		/* this looks strange, but is correct. We need to throttle things until
		   the system clears enough resources to handle this new socket */
		sleep(1);
		close(state->to_parent_fd);
		state->to_parent_fd = -1;
		TALLOC_FREE(state);
		return;
	}

	pid = fork();

	if (pid != 0) {
		close(state->to_parent_fd);
		state->to_parent_fd = -1;

		if (pid > 0) {
			state->pid = pid;
		} else {
			TALLOC_FREE(state);
		}

		/* parent or error code ... */
		talloc_free(sock2);
		/* go back to the event loop */
		return;
	}

	/* this leaves state->to_parent_fd open */
	TALLOC_FREE(state);

	pid = getpid();

	/* This is now the child code. We need a completely new event_context to work with */

	if (tevent_re_initialise(ev) != 0) {
		smb_panic("Failed to re-initialise tevent after fork");
	}

	/* this will free all the listening sockets and all state that
	   is not associated with this new connection */
	talloc_free(sock);

	/* we don't care if the dup fails, as its only a select()
	   speed optimisation */
	socket_dup(sock2);
			
	/* tdb needs special fork handling */
	ldb_wrap_fork_hook();

	/* Must be done after a fork() to reset messaging contexts. */
	status = imessaging_reinit_all();
	if (!NT_STATUS_IS_OK(status)) {
		smb_panic("Failed to re-initialise imessaging after fork");
	}

	fde = tevent_add_fd(ev, ev, child_pipe[0], TEVENT_FD_READ,
		      standard_pipe_handler, NULL);
	if (fde == NULL) {
		smb_panic("Failed to add fd handler after fork");
	}

	if (child_pipe[1] != -1) {
		close(child_pipe[1]);
		child_pipe[1] = -1;
	}

	se = tevent_add_signal(ev,
				ev,
				SIGHUP,
				0,
				sighup_signal_handler,
				NULL);
	if (se == NULL) {
		smb_panic("Failed to add SIGHUP handler after fork");
	}

	se = tevent_add_signal(ev,
				ev,
				SIGTERM,
				0,
				sigterm_signal_handler,
				NULL);
	if (se == NULL) {
		smb_panic("Failed to add SIGTERM handler after fork");
	}

	/* setup the process title */
	c = socket_get_peer_addr(sock2, ev);
	s = socket_get_my_addr(sock2, ev);
	if (s && c) {
		setproctitle("conn c[%s:%u] s[%s:%u] server_id[%d]",
			     c->addr, c->port, s->addr, s->port, (int)pid);
	}
	talloc_free(c);
	talloc_free(s);

	/* setup this new connection.  Cluster ID is PID based for this process model */
	new_conn(ev, lp_ctx, sock2, cluster_id(pid, 0), private_data);

	/* we can't return to the top level here, as that event context is gone,
	   so we now process events in the new event context until there are no
	   more to process */	   
	tevent_loop_wait(ev);

	talloc_free(ev);
	exit(0);
}
Exemplo n.º 30
0
/*
  called when a listening socket becomes readable. 
*/
static void standard_accept_connection(struct tevent_context *ev, 
				       struct loadparm_context *lp_ctx,
				       struct socket_context *sock, 
				       void (*new_conn)(struct tevent_context *,
							struct loadparm_context *, struct socket_context *, 
							struct server_id , void *), 
				       void *private_data)
{
	NTSTATUS status;
	struct socket_context *sock2;
	pid_t pid;
	struct tevent_context *ev2;
	struct socket_address *c, *s;

	/* accept an incoming connection. */
	status = socket_accept(sock, &sock2);
	if (!NT_STATUS_IS_OK(status)) {
		DEBUG(0,("standard_accept_connection: accept: %s\n",
			 nt_errstr(status)));
		/* this looks strange, but is correct. We need to throttle things until
		   the system clears enough resources to handle this new socket */
		sleep(1);
		return;
	}

	pid = fork();

	if (pid != 0) {
		/* parent or error code ... */
		talloc_free(sock2);
		/* go back to the event loop */
		return;
	}

	pid = getpid();

	/* This is now the child code. We need a completely new event_context to work with */
	ev2 = s4_event_context_init(NULL);

	/* the service has given us a private pointer that
	   encapsulates the context it needs for this new connection -
	   everything else will be freed */
	talloc_steal(ev2, private_data);
	talloc_steal(private_data, sock2);

	/* this will free all the listening sockets and all state that
	   is not associated with this new connection */
	talloc_free(sock);
	talloc_free(ev);

	/* we don't care if the dup fails, as its only a select()
	   speed optimisation */
	socket_dup(sock2);
			
	/* tdb needs special fork handling */
	if (tdb_reopen_all(1) == -1) {
		DEBUG(0,("standard_accept_connection: tdb_reopen_all failed.\n"));
	}

	/* Ensure that the forked children do not expose identical random streams */
	set_need_random_reseed();

	/* setup the process title */
	c = socket_get_peer_addr(sock2, ev2);
	s = socket_get_my_addr(sock2, ev2);
	if (s && c) {
		setproctitle("conn c[%s:%u] s[%s:%u] server_id[%d]",
			     c->addr, c->port, s->addr, s->port, pid);
	}
	talloc_free(c);
	talloc_free(s);

	/* setup this new connection.  Cluster ID is PID based for this process modal */
	new_conn(ev2, lp_ctx, sock2, cluster_id(pid, 0), private_data);

	/* we can't return to the top level here, as that event context is gone,
	   so we now process events in the new event context until there are no
	   more to process */	   
	event_loop_wait(ev2);

	talloc_free(ev2);
	exit(0);
}