コード例 #1
0
ファイル: socket_server.cpp プロジェクト: apppur/canna
int socket_server::send_buffer(struct socket * s, struct socket_message * result)
{
    assert(!list_uncomplete(&s->low));
    if (send_list(s, &s->high, result) == SOCKET_CLOSE) {
        return SOCKET_CLOSE;
    }
    if (s->high.head == nullptr) {
        if (s->low.head != nullptr) {
            if (send_list(s, &s->low, result) == SOCKET_CLOSE) {
                return SOCKET_CLOSE;
            }
            if (list_uncomplete(&s->low)) {
                raise_uncomplete(s);
            }
        } else {
            event_fd.write(s->fd, s, false);
            if (s->type == SOCKET_TYPE_HALFCLOSE) {
                force_close(s, result);
                return SOCKET_CLOSE;
            }
        }
    }

    return -1;
}
コード例 #2
0
ファイル: socket_server.c プロジェクト: Lvshen/company_coc
/*
	Each socket has two write buffer list, high priority and low priority.

	1. send high list as far as possible.
	2. If high list is empty, try to send low list.
	3. If low list head is uncomplete (send a part before), move the head of low list to empty high list (call raise_uncomplete) .
	4. If two lists are both empty, turn off the event. (call check_close)
 */
static int
send_buffer(struct socket_server *ss, struct socket *s, struct socket_message *result) {
	assert(!list_uncomplete(&s->low));
	// step 1
	if (send_list(ss,s,&s->high,result) == SOCKET_CLOSE) {
		return SOCKET_CLOSE;
	}
	if (s->high.head == NULL) {
		// step 2
		if (s->low.head != NULL) {
			if (send_list(ss,s,&s->low,result) == SOCKET_CLOSE) {
				return SOCKET_CLOSE;
			}
			// step 3
			if (list_uncomplete(&s->low)) {
				raise_uncomplete(s);
			}
		} else {
			// step 4
			sp_write(ss->event_fd, s->fd, s, false);

			if (s->type == SOCKET_TYPE_HALFCLOSE) {
				force_close(ss, s, result);
				return SOCKET_CLOSE;
			}
		}
	}

	return -1;
}
コード例 #3
0
ファイル: jcom.send.cpp プロジェクト: alexarje/JamomaModular
void send_float(t_send *x, double value)
{
	t_atom a;
	
	atom_setfloat(&a, value);
	send_list(x, _sym_float, 1, &a);
}
コード例 #4
0
ファイル: jcom.send.cpp プロジェクト: alexarje/JamomaModular
void send_int(t_send *x, long value)
{
	t_atom a;
	
	atom_setlong(&a, value);
	send_list(x, _sym_int, 1, &a);
}
コード例 #5
0
ファイル: sssvlv.c プロジェクト: osstech-jp/ReOpenLDAP
static void send_entry(
	Operation		*op,
	SlapReply		*rs,
	sort_op			*so)
{
	Debug(LDAP_DEBUG_TRACE,
		"%s: response control: status=%d, text=%s\n",
		debug_header, rs->sr_err, SAFESTR(rs->sr_text, "<None>"));

	if ( !so->so_tree )
		return;

	/* RFC 2891: If critical then send the entries iff they were
	 * successfully sorted.  If non-critical send all entries
	 * whether they were sorted or not.
	 */
	if ( (op->o_ctrlflag[sss_cid] != SLAP_CONTROL_CRITICAL) ||
		 (rs->sr_err == LDAP_SUCCESS) )
	{
		if ( so->so_vlv > SLAP_CONTROL_IGNORED ) {
			send_list( op, rs, so );
		} else {
			/* Get the first node to send */
			TAvlnode *start_node = tavl_end(so->so_tree, TAVL_DIR_LEFT);
			so->so_tree = start_node;

			if ( so->so_paged <= SLAP_CONTROL_IGNORED ) {
				/* Not paged result search.  Send all entries.
				 * Set the page size to the number of entries
				 * so that send_page() will send all entries.
				 */
				so->so_page_size = so->so_nentries;
			}

			send_page( op, rs, so );
		}
	}
}
コード例 #6
0
    // Underlying implemention of parameter request 
    void ParameterLoaderBase::ProcessRequest()
    {
        zmq::socket_t *socket = ZMQUtil::CreateSocket();
        std::vector<Table*>& cache =
            Multiverso::double_buffer_->IOBuffer();
        for (int i = 0; i < cache.size(); ++i)
        {
            cache[i]->Clear();
        }
        int src_rank = Multiverso::ProcessRank();
        int num_server = Multiverso::TotalServerCount();
        std::vector<MsgPack*> send_list(num_server, nullptr);
        std::vector<int> send_ret_size(num_server, 0);
        int num_send_msg = 0;
        for (auto tuple : requests_)
        {
            integer_t table = tuple.table;
            integer_t row = tuple.row;
            integer_t col = tuple.col;

            if (row >= 0 && requests_.find({ table, -1, -1 }) != requests_.end() ||
                col >= 0 && requests_.find({ table, row, -1 }) != requests_.end())
            {
                continue;
            }
            int dst_rank, last_rank;
            if (row == -1)
            {
                dst_rank = 0;
                last_rank = num_server - 1;
            }
            else
            {
                dst_rank = (table + row) % num_server;
                last_rank = dst_rank;
            }
            while (dst_rank <= last_rank)
            {
                if (send_list[dst_rank] == nullptr)
                {
                    send_list[dst_rank] = new MsgPack(MsgType::Get,
                        MsgArrow::Worker2Server, src_rank, dst_rank);
                    send_ret_size[dst_rank] = 0;
                }
                if (send_ret_size[dst_rank] + 3 * sizeof(integer_t) > kMaxMsgSize)
                {
                    send_list[dst_rank]->Send(socket);
                    ++num_send_msg;
                    delete send_list[dst_rank];
                    send_list[dst_rank] = new MsgPack(MsgType::Get,
                        MsgArrow::Worker2Server, src_rank, dst_rank);
                    send_ret_size[dst_rank] = 0;
                }
                zmq::message_t* msg = new zmq::message_t(3 * sizeof(integer_t));
                integer_t* buffer = static_cast<integer_t*>(msg->data());
                buffer[0] = table;
                buffer[1] = row;
                buffer[2] = col;
                send_list[dst_rank]->Push(msg);
                send_ret_size[dst_rank] += 3 * sizeof(integer_t);
                ++dst_rank;
            }
        }
        for (int i = 0; i < num_server; ++i)
        {
            if (send_ret_size[i] > 0)
            {
                send_list[i]->Send(socket);
                ++num_send_msg;
                delete send_list[i];
            }
        }

        // we expect each ReplyGet msg contains a over tag.
        while (num_send_msg > 0)
        {
            MsgPack reply(socket);
            for (int i = 1; i < reply.Size() - 1; ++i)
            {
                zmq::message_t* msg = reply.GetMsg(i);
                integer_t *buffer = static_cast<integer_t*>(msg->data());
                integer_t table = buffer[0];
                integer_t row = buffer[1];
                cache[table]->GetRow(row)->BatchAdd(buffer + 2);
            }
            zmq::message_t* msg = reply.GetMsg(reply.Size() - 1);
            bool over = (static_cast<integer_t*>(msg->data())[0] == 1);
            if (over)
            {
                --num_send_msg;
            }
        }
        delete socket;
    }
コード例 #7
0
ファイル: d_node.c プロジェクト: gtxx3600/SBPFS
int main() {

	//getlist();

	initlist();
	//writeblock(3, 3, 3, "abc");
	//getlist();
	//char*ip = "192.168.1.107";
	char*ip = "59.78.15.46";
	strcpy(dname,"haha");

	send_list(ip);
	//

	bzero(&list_head, sizeof(struct list_entry));
	pthread_t pid;

	int socket_descriptor;
	if ((socket_descriptor = socket(AF_INET, SOCK_STREAM, 0)) == -1) {
		perror("socket create error");
		exit(1);
	}
	int on;
	on = 1;
	setsockopt(socket_descriptor, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on));
	int port = 9010;
	struct sockaddr_in sin;
	sin.sin_family = AF_INET;
	sin.sin_addr.s_addr = INADDR_ANY;
	sin.sin_port = htons(port);
	bzero(&(sin.sin_zero), 8);
	if (bind(socket_descriptor, (struct sockaddr *) &sin, sizeof(sin)) == -1) {
		perror("bind error");
		exit(1);
	}

	if (listen(socket_descriptor, 10) == -1) {
		perror("listen error");
		exit(1);
	}

	struct sockaddr_in pin;
	struct list_entry* ent;

	int temp_socket_descriptor;

	printf("DISK DRIVER START...\n");
	//if (fork() != 0) {
	//send_copy(ip,1234);
	//} else {
	while (1) {
		unsigned int sin_size = sizeof(struct sockaddr_in);
		temp_socket_descriptor = accept(socket_descriptor,
				(struct sockaddr *) &pin, &sin_size);

		if (temp_socket_descriptor == -1) {
			perror("Accept Failed");
			continue;
		}
		//printf("Accept Success\n");
		if ((ent = (struct list_entry*) malloc(sizeof(struct list_entry)))
				== NULL) {
			perror("Malloc Failed");
			close(temp_socket_descriptor);
			continue;
		}
		bzero(ent, sizeof(struct list_entry));
		ent->skt = temp_socket_descriptor;
		if (pthread_create(&pid, NULL, serve, ent)) {
			perror("Create Thread Failed");
			close(temp_socket_descriptor);
			free(ent);
			continue;
		}
		pthread_detach(pid);
		//close(temp_socket_descriptor);
	}
	//}
	return 0;
}
コード例 #8
0
ファイル: main.c プロジェクト: TNick/aitown
//! process a request message
static index_error process_request ( index_data_t *index_data_, 
    void *server, void *data, size_t data_sz)
{ dbg_message (__func__);

	index_error errcode = FUNC_OK;
	
	// decode the message
	AiTownIndex * incoming = AiTownIndex__unpack (data_sz, data);
	if ( incoming == NULL ) {
		send_error (server, "Error decoding incoming message");
		return FUNC_OK;
	}
	
	// check the version - the only one that we know of is 1
	if ( incoming->version != 1 ) {
		send_error (server, "Unsupported version");
		AiTownIndex__free_unpacked (incoming);
		return FUNC_OK;
	}
	
	// type based processing
	switch ( incoming->type ) {
	case   AI_TOWN_INDEX_MESSAGE_TYPE__AITMT_INDEX_ADD: {
	
		// check the sanity of incoming data
		if ( ( incoming->add == NULL ) || 
		      ( incoming->add->name == NULL ) ||
		      ( incoming->add->address == NULL ) ||
		      ( !incoming->add->has_port ) ||
		      ( incoming->add->port <= 0 ) ) {
			send_error (server, "Malformed add request");
			break;
		}
		
		// allocate a new server structure for this
		server_data_t * sd;
		server_data_t * sd_other;
		errcode = server_data_new (&sd, incoming->add->name, 
		    incoming->add->address, incoming->add->port);
		if ( errcode != FUNC_OK ) {
			send_error (server, "Internal error");
			break;
		}
		
		// see if a server with this name already exists and remove it if so
		sd_other = index_data_get_server (index_data_, sd->name);
		if ( sd_other != NULL ) {
			index_data_rem_server (index_data_, sd_other );
			server_data_delete (&sd_other);
		}
		index_data_add_server (index_data_, sd);
		log_message ("Added server %s", sd->name);
		send_ok (server);
		break; }
		
	case   AI_TOWN_INDEX_MESSAGE_TYPE__AITMT_INDEX_REM: {
	
		// check the sanity of incoming data
		if ( ( incoming->rem == NULL ) ||
		      ( incoming->rem->name == NULL ) ) {
			send_error (server, "Malformed remove request");
			break;
		}
		
		// find the server in question
		const char * name = incoming->rem->name;
		server_data_t * sd;
		sd = index_data_get_server (index_data_, name);
		
		// and remove it
		if ( sd == NULL ) {
			send_error (server, "Attempt to close unexisting server");
			err_message (name);
		} else {
			index_data_rem_server (index_data_, sd );
			server_data_delete (&sd);
			log_message ("Removed server %s", name);
			send_ok (server);
		}
		break; }
	case   AI_TOWN_INDEX_MESSAGE_TYPE__AITMT_INDEX_LIST: {
		send_list (index_data_, server);
		break; }
	default:
		send_error (server, "Unknown incoming message type");
		err_message( "Requested type was %d", incoming->type );
	}

	// release serialized data
	AiTownIndex__free_unpacked (incoming);
	return errcode;
}
コード例 #9
0
ファイル: m_list.c プロジェクト: Northfire/rabbitircd
/*
 * * m_list *      parv[0] = sender prefix *      parv[1] = channel
 */
DLLFUNC CMD_FUNC(m_list)
{
	aChannel *chptr;
	TS   currenttime = TStime();
	char *name, *p = NULL;
	LOpts *lopt = NULL;
	Link *lp;
	int  usermax, usermin, error = 0, doall = 0;
	TS   chantimemin, chantimemax;
	TS   topictimemin, topictimemax;
	Link *yeslist = NULL, *nolist = NULL;

	static char *usage[] = {
		"   Usage: /LIST <options>",
		"",
		"If you don't include any options, the default is to send you the",
		"entire unfiltered list of channels. Below are the options you can",
		"use, and what channels LIST will return when you use them.",
		">number  List channels with more than <number> people.",
		"<number  List channels with less than <number> people.",
		"C>number List channels created between now and <number> minutes ago.",
		"C<number List channels created earlier than <number> minutes ago.",
		"T>number List channels whose topics are older than <number> minutes",
		"         (Ie, they have not changed in the last <number> minutes.",
		"T<number List channels whose topics are not older than <number> minutes.",
		"*mask*   List channels that match *mask*",
		"!*mask*  List channels that do not match *mask*",
		NULL
	};

	/* Some starting san checks -- No interserver lists allowed. */
	if (cptr != sptr || !sptr->user)
		return 0;

	/* If a /list is in progress, then another one will cancel it */
	if ((lopt = sptr->user->lopt) != NULL)
	{
		sendto_one(sptr, rpl_str(RPL_LISTEND), me.name, parv[0]);
		free_str_list(sptr->user->lopt->yeslist);
		free_str_list(sptr->user->lopt->nolist);
		MyFree(sptr->user->lopt);
		sptr->user->lopt = NULL;
		return 0;
	}

	if (parc < 2 || BadPtr(parv[1]))
	{

		sendto_one(sptr, rpl_str(RPL_LISTSTART), me.name, parv[0]);
		lopt = sptr->user->lopt = (LOpts *) MyMalloc(sizeof(LOpts));
		memset(lopt, '\0', sizeof(LOpts));

		lopt->showall = 1;

		if (DBufLength(&cptr->sendQ) < 2048)
			send_list(cptr, 64);

		return 0;
	}

	if ((parc == 2) && (parv[1][0] == '?') && (parv[1][1] == '\0'))
	{
		char **ptr = usage;
		for (; *ptr; ptr++)
			sendto_one(sptr, rpl_str(RPL_LISTSYNTAX),
			    me.name, cptr->name, *ptr);
		return 0;
	}

	sendto_one(sptr, rpl_str(RPL_LISTSTART), me.name, parv[0]);

	chantimemax = topictimemax = currenttime + 86400;
	chantimemin = topictimemin = 0;
	usermin = 1;		/* Minimum of 1 */
	usermax = -1;		/* No maximum */

	for (name = strtok_r(parv[1], ",", &p); name && !error;
	    name = strtok_r(NULL, ",", &p))
	{

		switch (*name)
		{
		  case '<':
			  usermax = atoi(name + 1) - 1;
			  doall = 1;
			  break;
		  case '>':
			  usermin = atoi(name + 1) + 1;
			  doall = 1;
			  break;
		  case 'C':
		  case 'c':	/* Channel TS time -- creation time? */
			  ++name;
			  switch (*name++)
			  {
			    case '<':
				    chantimemax = currenttime - 60 * atoi(name);
				    doall = 1;
				    break;
			    case '>':
				    chantimemin = currenttime - 60 * atoi(name);
				    doall = 1;
				    break;
			    default:
				    sendto_one(sptr, err_str(ERR_LISTSYNTAX), me.name, cptr->name);
				    error = 1;
			  }
			  break;
#ifdef LIST_USE_T
		  case 'T':
		  case 't':
			  ++name;
			  switch (*name++)
			  {
			    case '<':
				    topictimemax =
					currenttime - 60 * atoi(name);
				    doall = 1;
				    break;
			    case '>':
				    topictimemin =
					currenttime - 60 * atoi(name);
				    doall = 1;
				    break;
			    default:
				    sendto_one(sptr,
					err_str(ERR_LISTSYNTAX),
					me.name, cptr->name,
					"Bad list syntax, type /list ?");
				    error = 1;
			  }
			  break;
#endif
		  default:	/* A channel, possibly with wildcards.
				 * Thought for the future: Consider turning wildcard
				 * processing on the fly.
				 * new syntax: !channelmask will tell ircd to ignore
				 * any channels matching that mask, and then
				 * channelmask will tell ircd to send us a list of
				 * channels only masking channelmask. Note: Specifying
				 * a channel without wildcards will return that
				 * channel even if any of the !channelmask masks
				 * matches it.
				 */
			  if (*name == '!')
			  {
				  doall = 1;
				  lp = make_link();
				  lp->next = nolist;
				  nolist = lp;
				  DupString(lp->value.cp, name + 1);
			  }
			  else if (strchr(name, '*') || strchr(name, '?'))
			  {
				  doall = 1;
				  lp = make_link();
				  lp->next = yeslist;
				  yeslist = lp;
				  DupString(lp->value.cp, name);
			  }
			  else	/* Just a normal channel */
			  {
				  chptr = find_channel(name, NullChn);
				  if (chptr && (ShowChannel(sptr, chptr) || OPCanSeeSecret(sptr))) {
#ifdef LIST_SHOW_MODES
					modebuf[0] = '[';
					channel_modes(sptr, modebuf+1, parabuf, sizeof(modebuf)-1, sizeof(parabuf), chptr);
					if (modebuf[2] == '\0')
						modebuf[0] = '\0';
					else
						strlcat(modebuf, "]", sizeof modebuf);
#endif
					  sendto_one(sptr,
					      rpl_str(RPL_LIST),
					      me.name, parv[0],
					      name, chptr->users,
#ifdef LIST_SHOW_MODES
					      modebuf,
#endif
					      (chptr->topic ? chptr->topic :
					      ""));
}
			  }
		}		/* switch */
	}			/* while */

	if (doall)
	{
		lopt = sptr->user->lopt = (LOpts *) MyMalloc(sizeof(LOpts));
		memset(lopt, '\0', sizeof(LOpts));
		lopt->usermin = usermin;
		lopt->usermax = usermax;
		lopt->topictimemax = topictimemax;
		lopt->topictimemin = topictimemin;
		lopt->chantimemax = chantimemax;
		lopt->chantimemin = chantimemin;
		lopt->nolist = nolist;
		lopt->yeslist = yeslist;

		if (DBufLength(&cptr->sendQ) < 2048)
			send_list(cptr, 64);
		return 0;
	}

	sendto_one(sptr, rpl_str(RPL_LISTEND), me.name, parv[0]);

	return 0;
}
コード例 #10
0
ファイル: make_local_matrix.hpp プロジェクト: 8l/insieme
void
make_local_matrix(MatrixType& A)
{
#ifdef HAVE_MPI
  int numprocs = 1, myproc = 0;
  MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
  MPI_Comm_rank(MPI_COMM_WORLD, &myproc);

  if (numprocs < 2) {
    A.num_cols = A.rows.size();
    A.has_local_indices = true;
    return;
  }

  typedef typename MatrixType::GlobalOrdinalType GlobalOrdinal;
  typedef typename MatrixType::LocalOrdinalType LocalOrdinal;
  typedef typename MatrixType::ScalarType Scalar;

  std::map<GlobalOrdinal,GlobalOrdinal> externals;
  LocalOrdinal num_external = 0;

  //Extract Matrix pieces

  size_t local_nrow = A.rows.size();
  GlobalOrdinal start_row = local_nrow>0 ? A.rows[0] : -1;
  GlobalOrdinal stop_row  = local_nrow>0 ? A.rows[local_nrow-1] : -1;

  // We need to convert the index values for the rows on this processor
  // to a local index space. We need to:
  // - Determine if each index reaches to a local value or external value
  // - If local, subtract start_row from index value to get local index
  // - If external, find out if it is already accounted for.
  //   - If so, then do nothing,
  //   - otherwise
  //     - add it to the list of external indices,
  //     - find out which processor owns the value.
  //     - Set up communication for sparse MV operation

  ///////////////////////////////////////////
  // Scan the indices and transform to local
  ///////////////////////////////////////////

  std::vector<GlobalOrdinal>& external_index = A.external_index;

  for(size_t i=0; i<A.rows.size(); ++i) {
    GlobalOrdinal* Acols = NULL;
    Scalar* Acoefs = NULL;
    size_t row_len = 0;
    A.get_row_pointers(A.rows[i], row_len, Acols, Acoefs);

    for(size_t j=0; j<row_len; ++j) {
      GlobalOrdinal cur_ind = Acols[j];
      if (start_row <= cur_ind && cur_ind <= stop_row) {
        Acols[j] -= start_row;
      }
      else { // Must find out if we have already set up this point
        if (externals.find(cur_ind) == externals.end()) {
          externals[cur_ind] = num_external++;
          external_index.push_back(cur_ind);
        }
        // Mark index as external by adding 1 and negating it
        Acols[j] = -(Acols[j] + 1);
      }
    }
  }

  ////////////////////////////////////////////////////////////////////////
  // Go through list of externals to find out which processors must be accessed.
  ////////////////////////////////////////////////////////////////////////

  std::vector<GlobalOrdinal> tmp_buffer(numprocs, 0); // Temp buffer space needed below

  // Build list of global index offset

  std::vector<GlobalOrdinal> global_index_offsets(numprocs, 0);

  tmp_buffer[myproc] = start_row; // This is my start row

  // This call sends the start_row of each ith processor to the ith
  // entry of global_index_offsets on all processors.
  // Thus, each processor knows the range of indices owned by all
  // other processors.
  // Note: There might be a better algorithm for doing this, but this
  //       will work...

  MPI_Datatype mpi_dtype = TypeTraits<GlobalOrdinal>::mpi_type();
  MPI_Allreduce(&tmp_buffer[0], &global_index_offsets[0], numprocs, mpi_dtype,
                MPI_SUM, MPI_COMM_WORLD);

  // Go through list of externals and find the processor that owns each
  std::vector<int> external_processor(num_external);

  for(LocalOrdinal i=0; i<num_external; ++i) {
    GlobalOrdinal cur_ind = external_index[i];
    for(int j=numprocs-1; j>=0; --j) {
      if (global_index_offsets[j] <= cur_ind && global_index_offsets[j] >= 0) {
        external_processor[i] = j;
        break;
      }
    }
  }

  /////////////////////////////////////////////////////////////////////////
  // Sift through the external elements. For each newly encountered external
  // point assign it the next index in the sequence. Then look for other
  // external elements who are updated by the same node and assign them the next
  // set of index numbers in the sequence (ie. elements updated by the same node
  // have consecutive indices).
  /////////////////////////////////////////////////////////////////////////

  size_t count = local_nrow;
  std::vector<GlobalOrdinal>& external_local_index = A.external_local_index;
  external_local_index.assign(num_external, -1);

  for(LocalOrdinal i=0; i<num_external; ++i) {
    if (external_local_index[i] == -1) {
      external_local_index[i] = count++;

      for(LocalOrdinal j=i+1; j<num_external; ++j) {
        if (external_processor[j] == external_processor[i])
          external_local_index[j] = count++;
      }
    }
  }

  for(size_t i=0; i<local_nrow; ++i) {
    GlobalOrdinal* Acols = NULL;
    Scalar* Acoefs = NULL;
    size_t row_len = 0;
    A.get_row_pointers(A.rows[i], row_len, Acols, Acoefs);

    for(size_t j=0; j<row_len; ++j) {
      if (Acols[j] < 0) { // Change index values of externals
        GlobalOrdinal cur_ind = -Acols[j] - 1;
        Acols[j] = external_local_index[externals[cur_ind]];
      }
    }
  }

  std::vector<int> new_external_processor(num_external, 0);

  for(int i=0; i<num_external; ++i) {
    new_external_processor[external_local_index[i]-local_nrow] =
      external_processor[i];
  }

  ////////////////////////////////////////////////////////////////////////
  ///
  // Count the number of neighbors from which we receive information to update
  // our external elements. Additionally, fill the array tmp_neighbors in the
  // following way:
  //      tmp_neighbors[i] = 0   ==>  No external elements are updated by
  //                              processor i.
  //      tmp_neighbors[i] = x   ==>  (x-1)/numprocs elements are updated from
  //                              processor i.
  ///
  ////////////////////////////////////////////////////////////////////////

  std::vector<GlobalOrdinal> tmp_neighbors(numprocs, 0);

  int num_recv_neighbors = 0;
  int length             = 1;

  for(LocalOrdinal i=0; i<num_external; ++i) {
    if (tmp_neighbors[new_external_processor[i]] == 0) {
      ++num_recv_neighbors;
      tmp_neighbors[new_external_processor[i]] = 1;
    }
    tmp_neighbors[new_external_processor[i]] += numprocs;
  }

  /// sum over all processor all the tmp_neighbors arrays ///

  MPI_Allreduce(&tmp_neighbors[0], &tmp_buffer[0], numprocs, mpi_dtype,
                MPI_SUM, MPI_COMM_WORLD);

  // decode the combined 'tmp_neighbors' (stored in tmp_buffer)
  // array from all the processors

  GlobalOrdinal num_send_neighbors = tmp_buffer[myproc] % numprocs;

  /// decode 'tmp_buffer[myproc] to deduce total number of elements
  //  we must send

  GlobalOrdinal total_to_be_sent = (tmp_buffer[myproc] - num_send_neighbors) / numprocs;

  ///////////////////////////////////////////////////////////////////////
  ///
  // Make a list of the neighbors that will send information to update our
  // external elements (in the order that we will receive this information).
  ///
  ///////////////////////////////////////////////////////////////////////

  std::vector<int> recv_list;
  recv_list.push_back(new_external_processor[0]);
  for(LocalOrdinal i=1; i<num_external; ++i) {
    if (new_external_processor[i-1] != new_external_processor[i]) {
      recv_list.push_back(new_external_processor[i]);
    }
  }

  //
  // Send a 0 length message to each of our recv neighbors
  //

  std::vector<int> send_list(num_send_neighbors, 0);

  //
  // first post receives, these are immediate receives
  // Do not wait for result to come, will do that at the
  // wait call below.
  //
  int MPI_MY_TAG = 99;

  std::vector<MPI_Request> request(num_send_neighbors);
  for(int i=0; i<num_send_neighbors; ++i) {
    MPI_Irecv(&tmp_buffer[i], 1, mpi_dtype, MPI_ANY_SOURCE, MPI_MY_TAG,
              MPI_COMM_WORLD, &request[i]);
  }

  // send messages

  for(int i=0; i<num_recv_neighbors; ++i) {
    MPI_Send(&tmp_buffer[i], 1, mpi_dtype, recv_list[i], MPI_MY_TAG,
             MPI_COMM_WORLD);
  }

  ///
  // Receive message from each send neighbor to construct 'send_list'.
  ///

  MPI_Status status;
  for(int i=0; i<num_send_neighbors; ++i) {
    if (MPI_Wait(&request[i], &status) != MPI_SUCCESS) {
      std::cerr << "MPI_Wait error\n"<<std::endl;
      MPI_Abort(MPI_COMM_WORLD, -1);
    }
    send_list[i] = status.MPI_SOURCE;
  }

  //////////////////////////////////////////////////////////////////////
  ///
  // Compare the two lists. In most cases they should be the same.
  // However, if they are not then add new entries to the recv list
  // that are in the send list (but not already in the recv list).
  ///
  //////////////////////////////////////////////////////////////////////

  for(int j=0; j<num_send_neighbors; ++j) {
    int found = 0;
    for(int i=0; i<num_recv_neighbors; ++i) {
      if (recv_list[i] == send_list[j]) found = 1;
    }

    if (found == 0) {
      recv_list.push_back(send_list[j]);
      ++num_recv_neighbors;
    }
  }

  num_send_neighbors = num_recv_neighbors;
  request.resize(num_send_neighbors);

  A.elements_to_send.assign(total_to_be_sent, 0);
  A.send_buffer.assign(total_to_be_sent, 0);

  //
  // Create 'new_external' which explicitly put the external elements in the
  // order given by 'external_local_index'
  //

  std::vector<GlobalOrdinal> new_external(num_external);
  for(LocalOrdinal i=0; i<num_external; ++i) {
    new_external[external_local_index[i] - local_nrow] = external_index[i];
  }

  /////////////////////////////////////////////////////////////////////////
  //
  // Send each processor the global index list of the external elements in the
  // order that I will want to receive them when updating my external elements.
  //
  /////////////////////////////////////////////////////////////////////////

  std::vector<int> lengths(num_recv_neighbors);

  ++MPI_MY_TAG;

  // First post receives

  for(int i=0; i<num_recv_neighbors; ++i) {
    int partner = recv_list[i];
    MPI_Irecv(&lengths[i], 1, MPI_INT, partner, MPI_MY_TAG, MPI_COMM_WORLD,
              &request[i]);
  }

  std::vector<int>& neighbors = A.neighbors;
  std::vector<int>& recv_length = A.recv_length;
  std::vector<int>& send_length = A.send_length;

  neighbors.resize(num_recv_neighbors, 0);
  A.request.resize(num_recv_neighbors);
  recv_length.resize(num_recv_neighbors, 0);
  send_length.resize(num_recv_neighbors, 0);

  LocalOrdinal j = 0;
  for(int i=0; i<num_recv_neighbors; ++i) {
    int start = j;
    int newlength = 0;

    //go through list of external elements until updating
    //processor changes

    while((j < num_external) &&
          (new_external_processor[j] == recv_list[i])) {
      ++newlength;
      ++j;
      if (j == num_external) break;
    }

    recv_length[i] = newlength;
    neighbors[i] = recv_list[i];

    length = j - start;
    MPI_Send(&length, 1, MPI_INT, recv_list[i], MPI_MY_TAG, MPI_COMM_WORLD);
  }

  // Complete the receives of the number of externals

  for(int i=0; i<num_recv_neighbors; ++i) {
    if (MPI_Wait(&request[i], &status) != MPI_SUCCESS) {
      std::cerr << "MPI_Wait error\n"<<std::endl;
      MPI_Abort(MPI_COMM_WORLD, -1);
    }
    send_length[i] = lengths[i];
  }

  ////////////////////////////////////////////////////////////////////////
  // Build "elements_to_send" list. These are the x elements I own
  // that need to be sent to other processors.
  ////////////////////////////////////////////////////////////////////////

  ++MPI_MY_TAG;

  j = 0;
  for(int i=0; i<num_recv_neighbors; ++i) {
    MPI_Irecv(&A.elements_to_send[j], send_length[i], mpi_dtype, neighbors[i],
              MPI_MY_TAG, MPI_COMM_WORLD, &request[i]);
    j += send_length[i];
  }

  j = 0;
  for(int i=0; i<num_recv_neighbors; ++i) {
    LocalOrdinal start = j;
    LocalOrdinal newlength = 0;

    // Go through list of external elements
    // until updating processor changes. This is redundant, but
    // saves us from recording this information.

    while((j < num_external) &&
          (new_external_processor[j] == recv_list[i])) {
      ++newlength;
      ++j;
      if (j == num_external) break;
    }
    MPI_Send(&new_external[start], j-start, mpi_dtype, recv_list[i],
             MPI_MY_TAG, MPI_COMM_WORLD);
  }

  // receive from each neighbor the global index list of external elements

  for(int i=0; i<num_recv_neighbors; ++i) {
    if (MPI_Wait(&request[i], &status) != MPI_SUCCESS) {
      std::cerr << "MPI_Wait error\n"<<std::endl;
      MPI_Abort(MPI_COMM_WORLD, -1);
    }
  }

  /// replace global indices by local indices ///

  for(GlobalOrdinal i=0; i<total_to_be_sent; ++i) {
    A.elements_to_send[i] -= start_row;
    if (A.elements_to_send[i] >= A.rows.size()) {
//std::cout<<"start_row: "<<start_row<<", A.elements_to_send[i]: "<<A.elements_to_send[i]<<", A.rows.size(): "<<A.rows.size()<<std::endl;
    assert(A.elements_to_send[i] < A.rows.size());
    }
  }

  //////////////////
  // Finish up !!
  //////////////////

  A.num_cols = local_nrow + num_external;

#else
  A.num_cols = A.rows.size();
#endif

  A.has_local_indices = true;
}
コード例 #11
0
ファイル: sssvlv.c プロジェクト: osstech-jp/ReOpenLDAP
static int sssvlv_op_search(
	Operation		*op,
	SlapReply		*rs)
{
	slap_overinst			*on			= (slap_overinst *)op->o_bd->bd_info;
	sssvlv_info				*si			= on->on_bi.bi_private;
	int						rc			= SLAP_CB_CONTINUE;
	int	ok, need_unlock = 0;
	sort_ctrl *sc;
	PagedResultsState *ps;
	vlv_ctrl *vc;
	int sess_id;

	if ( op->o_ctrlflag[sss_cid] <= SLAP_CONTROL_IGNORED ) {
		if ( op->o_ctrlflag[vlv_cid] > SLAP_CONTROL_IGNORED ) {
			sort_op so; memset(&so, 0, sizeof(so));
			so.so_vlv_rc = LDAP_VLV_SSS_MISSING;
			so.so_vlv = op->o_ctrlflag[vlv_cid];
			LDAPControl *ctrls[2];
			rc = pack_vlv_response_control( op, rs, &so, ctrls );
			if ( rc == LDAP_SUCCESS ) {
				ctrls[1] = NULL;
				slap_add_ctrls( op, rs, ctrls );
			}
			rs->sr_err = LDAP_VLV_ERROR;
			rs->sr_text = "Sort control is required with VLV";
			goto leave;
		}
		/* Not server side sort so just continue */
		return SLAP_CB_CONTINUE;
	}

	Debug(LDAP_DEBUG_TRACE,
		"==> sssvlv_search: <%s> %s, control flag: %d\n",
		op->o_req_dn.bv_val, op->ors_filterstr.bv_val,
		op->o_ctrlflag[sss_cid]);

	sc = op->o_controls[sss_cid];
	if ( sc->sc_nkeys > si->svi_max_keys ) {
		rs->sr_text = "Too many sort keys";
		rs->sr_err = LDAP_UNWILLING_TO_PERFORM;
		goto leave;
	}

	ps = ( op->o_pagedresults > SLAP_CONTROL_IGNORED ) ?
		(PagedResultsState*)(op->o_pagedresults_state) : NULL;
	vc = op->o_ctrlflag[vlv_cid] > SLAP_CONTROL_IGNORED ?
		op->o_controls[vlv_cid] : NULL;

	if ( ps && vc ) {
		rs->sr_text = "VLV incompatible with PagedResults";
		rs->sr_err = LDAP_UNWILLING_TO_PERFORM;
		goto leave;
	}

	ldap_pvt_thread_mutex_lock( &sort_conns_mutex );
	ok = need_unlock = 1;
	sort_op *so = NULL;

	/* Is there already a sort running on this conn? */
	sess_id = find_session_by_context( si->svi_max_percon, op->o_conn->c_conn_idx, vc ? vc->vc_context : NO_VC_CONTEXT, ps ? ps->ps_cookie : NO_PS_COOKIE );
	if ( sess_id >= 0 ) {
		so = sort_conns[op->o_conn->c_conn_idx][sess_id];
		if (so->so_running) {
			/* another thread is handling, response busy to client */
			ok = 0;
			so = NULL;
		} else {
			/* Is it a continuation of a VLV search? */
			if ( !vc || so->so_vlv <= SLAP_CONTROL_IGNORED ||
					vc->vc_context != so->so_vcontext ) {
				/* Is it a continuation of a paged search? */
				if ( !ps || so->so_paged <= SLAP_CONTROL_IGNORED ||
					op->o_conn->c_pagedresults_state.ps_cookie != ps->ps_cookie ) {
					ok = 0;
				} else if ( !ps->ps_size ) {
					/* Abandoning current request */
					ok = 0;
					so->so_nentries = 0;
					rs->sr_err = LDAP_SUCCESS;
				}
			}
			if (( vc && so->so_paged > SLAP_CONTROL_IGNORED ) ||
					( ps && so->so_vlv > SLAP_CONTROL_IGNORED )) {
				/* changed from paged to vlv or vice versa, abandon */
				ok = 0;
				so->so_nentries = 0;
				rs->sr_err = LDAP_UNWILLING_TO_PERFORM;
			}
			if ( ok ) {
				/* occupy before mutex unlock */
				so->so_running = 1;
			}
		}
	/* Are there too many running overall? */
	} else if ( si->svi_num >= si->svi_max ) {
		ok = 0;
	} else if ( ( sess_id = find_next_session(si->svi_max_percon, op->o_conn->c_conn_idx ) ) < 0 ) {
		ok = 0;
	} else {
		/* OK, this connection going a sort running as the sess_id */
	}

	if (! ok || so != NULL) {
		assert(need_unlock != 0);
		ldap_pvt_thread_mutex_unlock( &sort_conns_mutex );
		need_unlock = 0;
	}

	if ( ok ) {
		/* If we're a global overlay, this check got bypassed */
		if ( !op->ors_limit && limits_check( op, rs )) {
			if (need_unlock) {
				ldap_pvt_thread_mutex_unlock( &sort_conns_mutex );
				need_unlock = 0;
			}
			if (so)
				free_sort_op( op->o_conn, so );
			return rs->sr_err;
		}
		/* are we continuing a VLV search? */
		if ( so && vc && vc->vc_context ) {
			assert(need_unlock == 0);
			so->so_ctrl = sc;
			send_list( op, rs, so );
			send_result( op, rs, so );
			rc = LDAP_SUCCESS;
		/* are we continuing a paged search? */
		} else if ( so && ps && ps->ps_cookie ) {
			assert(need_unlock == 0);
			so->so_ctrl = sc;
			send_page( op, rs, so );
			send_result( op, rs, so );
			rc = LDAP_SUCCESS;
		} else {
			/* Install serversort response callback to handle a new search */
			assert(need_unlock != 0);
			assert(so == NULL);

			so = ch_calloc( 1, sizeof(sort_op));
			slap_callback *cb = op->o_tmpcalloc( 1, sizeof(slap_callback),
				op->o_tmpmemctx );
			LDAP_ENSURE(so != NULL && cb != NULL); /* FIXME: LDAP_OTHER */

			cb->sc_response		= sssvlv_op_response;
			cb->sc_next			= op->o_callback;
			cb->sc_private		= so;

			assert(so->so_tree == NULL);
			so->so_ctrl = sc;
			so->so_info = si;
			if ( ps ) {
				so->so_paged = op->o_pagedresults;
				so->so_page_size = ps->ps_size;
				op->o_pagedresults = SLAP_CONTROL_IGNORED;
				assert(so->so_page_size != 0);
			} else {
				if ( vc ) {
					so->so_vlv = op->o_ctrlflag[vlv_cid];
					assert(so->so_vlv_target == 0);
					assert(so->so_vlv_rc == 0);
					assert(so->so_vlv != SLAP_CONTROL_NONE);
				} else {
					assert(so->so_vlv == SLAP_CONTROL_NONE);
				}
			}
			so->so_session = sess_id;
			so->so_vlv = op->o_ctrlflag[vlv_cid];
			so->so_vcontext = (size_t)so;
			assert(so->so_nentries == 0);
			op->o_callback = cb;

			assert(sess_id >= 0);
			so->so_running = 1;
			sort_conns[op->o_conn->c_conn_idx][sess_id] = so;
			si->svi_num++;
			ldap_pvt_thread_mutex_unlock( &sort_conns_mutex );
			need_unlock = 0;
		}
		assert(need_unlock == 0);
	} else {
		assert(need_unlock == 0);
		if ( so && !so->so_nentries ) {
			free_sort_op( op->o_conn, so );
		} else {
			rs->sr_text = "Other sort requests already in progress";
			rs->sr_err = LDAP_BUSY;
		}
leave:
		assert(need_unlock == 0);
		rc = rs->sr_err;
		send_ldap_result( op, rs );
	}

	assert(need_unlock == 0);
	return rc;
}
コード例 #12
0
ファイル: jcom.send.cpp プロジェクト: alexarje/JamomaModular
void send_bang(t_send *x)
{
	send_list(x, _sym_bang, 0, NULL);
}
コード例 #13
0
ファイル: wxftp.c プロジェクト: wangkendy/wxFtp
int main()
{
	char HOME[128];
	int client_len;
	struct sockaddr_in client_address;
	int result;
	fd_set readfds, testfds;

	struct sigaction act;
	act.sa_handler = sigint_handler;
	sigemptyset(&act.sa_mask);
	act.sa_flags = 0;
	sigaction(SIGINT, &act, 0);

	get_home_dir(HOME);
	printf("HOME:%s\n", HOME);

	//global variable server_sockfd will be set.
	create_server_socket();

	FD_ZERO(&readfds);
	FD_SET(server_sockfd, &readfds);

	while(1) {
		char buf[128];
		int fd;
		int nread;
	
		testfds = readfds;

		printf("server waiting\n");
		result = select(FD_SETSIZE, &testfds, (fd_set *)0, (fd_set *)0, (struct timeval *)0);

		if(result < 1) {
			perror("ftp_server");
			exit(1);
		}

		for(fd = 0; fd < FD_SETSIZE; fd++) {
			if(FD_ISSET(fd, &testfds)) {
				if(fd == server_sockfd) {//有新来的客户链接
					client_len = sizeof(client_address);
					client_sockfd = accept(server_sockfd, (struct sockaddr *)&client_address, &client_len);
					FD_SET(client_sockfd, &readfds);
					client[client_sockfd].cmd_sockfd = client_sockfd;
					client[client_sockfd].sin_addr.s_addr = client_address.sin_addr.s_addr;
					sprintf(client[client_sockfd].cwd, "/");
					printf("adding client on fd %d\n", client_sockfd);
					sprintf(buf, "220 (wxftp 1.0)\r\n");
					write(client_sockfd, buf, strlen(buf));
				} else {//客户链接发来数据
					ioctl(fd, FIONREAD, &nread);
					if(nread == 0) {//无数据可读,表示客户已主动断开链接
						close(fd);
						memset(&client[fd], 0, sizeof(struct Client));
						FD_CLR(fd, &readfds);
						printf("removing client on fd %d\n", fd);
					} else {//读取数据并当作命令来处理。
						read(fd, buf, nread);
						buf[nread] = '\0';
						printf("serving client on fd %d: %s\n", fd, buf);
						if(strncmp(buf, "USER", 4) == 0) {
							sscanf(&buf[4], "%s", client[fd].user);
							//printf("user %s\n", client[fd].user);
							sprintf(buf, "331 Password required for %s.\r\n", client[fd].user);
							write(fd, buf, strlen(buf));
						} else if(strncmp(buf, "PASS", 4) == 0) {
							sscanf(&buf[4], "%s", client[fd].pass);
							if (authenticate(client[fd].user, client[fd].pass) == 0) {
								client[fd].authorized = 1;
								sprintf(buf, "230 Login successful.\r\n");
							} else {
								client[fd].authorized = 0;
								sprintf(buf, "530 Login or Password incorrect.\r\n");
							}
							write(fd, buf, strlen(buf));
						} else if(strncmp(buf, "SYST", 4) == 0) {
							sprintf(buf, "215 Linux.\r\n");
							write(fd, buf, strlen(buf));
						} else if(strncmp(buf, "FEAT", 4) == 0) {
							sprintf(buf, "550 Not support.\r\n");
							write(fd, buf, strlen(buf));
						} else if(strncmp(buf, "PWD", 3) == 0) {
							sprintf(buf, "257 \"%s\" is current directory.\r\n", client[fd].cwd);
							write(fd, buf, strlen(buf));
						} else if(strncmp(buf, "CWD", 3) == 0) {
							char dir[128];
							sscanf(&buf[3], "%s", dir);
							if (strncmp(dir, "..", 2) == 0) {
								if(strlen(client[fd].cwd) == 1) {
									sprintf(buf, "250 \"/\" is current directory.\r\n");
								} else {
									int i;
									char *cwd = client[fd].cwd;
									int len = strlen(cwd);
									for(i = len - 1; i >= 0; i--) {
										if(cwd[i] == '/' && i != len - 1) {
											cwd[i+1] = '\0';
											break;
										}
									}
									sprintf(buf, "250 CWD command successful. \"%s\" is current directory.\r\n", client[fd].cwd);
								}
							} else if(file_exist(client[fd].cwd, dir) == 0) {
								//client[fd].cwd = ;
								char *cwd = client[fd].cwd;
								int len = strlen(cwd);
								if(cwd[len-1] == '/') {
									sprintf(&client[fd].cwd[len], "%s", dir);
								} else {
									sprintf(&client[fd].cwd[len], "/%s", dir);
								}
								sprintf(buf,"250 CWD command successful. \"%s\" is current directory.\r\n",client[fd].cwd );
							} else {
								sprintf(buf,"550 CWD failed. \"%s\": no such file or directory.\r\n", dir);
							}
							write(fd, buf, strlen(buf));
						} else if(strncmp(buf, "CDUP", 4) == 0) {
							if(strlen(client[fd].cwd) == 1) {
								sprintf(buf, "250 \"/\" is current directory.\r\n");
							} else {
								//make some change to client[fd].cwd
								int i;
								char *cwd = client[fd].cwd;
								int len = strlen(cwd);
								printf("%s: %d\n", cwd, len);
								for(i = len - 1; i >= 0; i--) {
									printf("%d: %c\n", i, client[fd].cwd[i]);
									if(client[fd].cwd[i] == '/' && i != len - 1) {
										client[fd].cwd[i+1] = '\0';
										break;
									}
								}
								sprintf(buf, "250 \"%s\" is current directory.\r\n",client[fd].cwd);
							}
							write(fd, buf, strlen(buf));
						} else if (strncmp(buf, "MKD", 3) == 0) {
							char dir[128];
							sscanf(&buf[3], "%s", dir);
							if (file_exist(client[fd].cwd, dir) == 0) {
								sprintf(buf, "550 Command failed. %s exists.\r\n", dir);
								write(fd, buf, strlen(buf));
							} else {
								make_dir(client[fd].cwd, dir);
								sprintf(buf, "250 Command ok. %s made.\r\n", dir);
								write(fd, buf, strlen(buf));
							}
						} else if (strncmp(buf, "RMD", 3) == 0) {
							char dir[128];
							sscanf(&buf[3], "%s", dir);
							if (file_exist(client[fd].cwd, dir) == 0) {
								remove_dir(client[fd].cwd, dir);
								sprintf(buf, "250 Command ok. %s removed.\r\n", dir);
								write(fd, buf, strlen(buf));
							} else {
								sprintf(buf, "550 Command failed. %s doesn't exist.\r\n", dir);
								write(fd, buf, strlen(buf));
							}
						} else if (strncmp(buf, "RNFR", 4) == 0) {
							char dir[128];
							sscanf(&buf[4], "%s", dir);
							if (file_exist(client[fd].cwd, dir) == 0) {
								sscanf(&buf[4], "%s", client[fd].rnfr);
								sprintf(buf, "350 File exists, ready for destination name.\r\n");
								write(fd, buf, strlen(buf));
							} else {
								sprintf(buf, "550 File/directory not found.\r\n");
								write(fd, buf, strlen(buf));
							}
						} else if (strncmp(buf, "RNTO", 4) == 0) {
							char dir[128];
							sscanf(&buf[4], "%s", dir);
							if (file_exist(client[fd].cwd, dir) == 0) {
								sprintf(buf, "550 Comman failed. %s exists.\r\n", dir);
								write(fd, buf, strlen(buf));
							} else {
								rename_dir(client[fd].cwd, client[fd].rnfr, dir);
								sprintf(buf, "250 File rename successfully.\r\n");
								write(fd, buf, strlen(buf));
							}
						} else if(strncmp(buf, "TYPE", 4) == 0) {
							char type[10];
							sscanf(&buf[4], "%s", type);
							sprintf(buf, "200 Type set to %s.\r\n", type);
							write(fd, buf, strlen(buf));
						} else if(strncmp(buf, "PASV", 4) == 0) {
							int port = rand() % 1000 + 8000;
							client[fd].data_port = port;
							client[fd].mode = kDataConnModePassive;
							struct sockaddr_in name;
							int name_len = sizeof(struct sockaddr_in);
							getsockname(fd, (struct sockaddr*)&name, &name_len);
							printf("server ip address is : %s\n port: %d\n", inet_ntoa(name.sin_addr), port);
							sprintf(buf, "227 Entering Passive Mode (210,25,132,182,%d,%d)\r\n", port / 256, port % 256);
							write(fd, buf, strlen(buf));
							int sockfd = data_conn_passive(port);
							printf("PASV sockfd: %d\n", sockfd);
							client[fd].data_sockfd = sockfd;
						} else if(strncmp(buf, "PORT", 4) == 0) {
							int ip[4], port[2];
							sscanf(&buf[4], "%d,%d,%d,%d,%d,%d", &ip[0], &ip[1], &ip[2], &ip[3], &port[0], &port[1]);
							sprintf(client[fd].data_ip, "%d.%d.%d.%d", ip[0], ip[1], ip[2], ip[3]);
							client[fd].data_port = port[0] * 256 + port[1];
							client[fd].mode = kDataConnModeActive;
							printf("IP:%s, Port:%d\n", client[fd].data_ip, client[fd].data_port);
							sprintf(buf, "200 Port command successful.\r\n");
							write(fd, buf, strlen(buf));
						} else if(strncmp(buf, "LIST", 4) == 0 || strncmp(buf, "NLST", 4) == 0) {
							int sockfd;
							if(client[fd].mode == kDataConnModeActive) {
								sockfd = data_conn_active(&client[fd].sin_addr, client[fd].data_port);
							} else if (client[fd].mode == kDataConnModePassive) {
								//sockfd = data_conn_passive(client[fd].data_port);
								sockfd = client[fd].data_sockfd;
							}
							int result = 0;
							if (sockfd != -1) {
								sprintf(buf,"150 Opening data connection for directory list.\r\n");
								write(fd, buf, strlen(buf));
								if(send_list(sockfd, client[fd].cwd) == 0) {
									sprintf(buf, "226 Transfer ok.\r\n");
								} else {
									sprintf(buf, "550 Error encountered.\r\n");
								}
								write(fd, buf, strlen(buf));
								close(sockfd);
							} else {
								printf("CREATE DATA_CONN FAILE.\n");
							}
						} else if (strncmp(buf, "RETR", 4) == 0) {
							char filename[64];
							sscanf(&buf[4], "%s", filename);
							if(file_exist(client[fd].cwd, filename) != 0) {
								sprintf(buf, "550 \"%s\": no such file.\r\n", filename);
								write(fd, buf, strlen(buf));
							} else {
								int sockfd;
								if(client[fd].mode == kDataConnModeActive) {
									sockfd = data_conn_active(&client[fd].sin_addr, client[fd].data_port);
								} else if (client[fd].mode == kDataConnModePassive) {
									sockfd = data_conn_passive(client[fd].data_port);
								}
								int result = 0;
								if (sockfd != -1) {
									sprintf(buf, "150 Opening data connection for %s\r\n", filename);
									write(fd, buf, strlen(buf));
									char filedir[128];
									int len = strlen(client[fd].cwd);
									if(client[fd].cwd[len-1] == '/') {
										sprintf(filedir, "%s%s%s", HOME, client[fd].cwd, filename);
									} else {
										sprintf(filedir, "%s%s/%s", HOME, client[fd].cwd, filename);
									}	
									result = send_file(sockfd, filedir);
									printf("send result: %d\n", result);
									if (result != -1) {
										close(sockfd);
										sprintf(buf, "226 File sent ok.\r\n");
										write(fd, buf, strlen(buf));
									}
								}
							}
						} else if (strncmp(buf, "STOR", 4) == 0) {
							char filename[64];
							sscanf(&buf[4], "%s", filename);
							int sockfd;
							if(client[fd].mode == kDataConnModeActive) {
								sockfd = data_conn_active(&client[fd].sin_addr, client[fd].data_port);
							} else if (client[fd].mode == kDataConnModePassive) {
								sockfd = data_conn_passive(client[fd].data_port);
							}
							int result = 0;
							if (sockfd != -1) {
								sprintf(buf, "150 Opening data connection for %s\r\n", filename);
								write(fd, buf, strlen(buf));
								char filedir[128];
								int len = strlen(client[fd].cwd);
								if(client[fd].cwd[len-1] == '/') {
									sprintf(filedir, "%s%s%s", HOME, client[fd].cwd, filename);
								} else {
									sprintf(filedir, "%s%s/%s", HOME, client[fd].cwd, filename);
								}	
								result = recv_file(sockfd, /*filename*/filedir);
								printf("recv result: %d\n", result);
								if (result != -1) {
									close(sockfd);
									sprintf(buf, "226 File received ok.\r\n");
									write(fd, buf, strlen(buf));
								}
							}
						} else if (strncmp(buf, "QUIT", 4) == 0) {
							sprintf(buf, "221 Goodbye.\r\n");
							write(fd, buf, strlen(buf));
							close(fd);
							memset(&client[fd], 0, sizeof(struct Client));
							FD_CLR(fd, &readfds);
							printf("removing client on fd %d\n", fd);
						} else {
							sprintf(buf, "550 Unknown command.\r\n");
							write(fd, buf, strlen(buf));
						}
					}
				}
			}
		}
	}
	return 0;
}
コード例 #14
0
ファイル: aggregator.cpp プロジェクト: nexcafe/multiverso
    void Aggregator::Send(int id, zmq::socket_t* socket)
    {
        int src_rank = Multiverso::ProcessRank();
        int num_server = Multiverso::TotalServerCount();

        std::vector<MsgPack*> send_list(num_server, nullptr);
        std::vector<int> send_ret_size(num_server, 0);

        for (int table_id = 0; table_id < tables_.size(); ++table_id)
        {
            Table* table = tables_[table_id];
            TableIterator iter(*table);

            for (; iter.HasNext(); iter.Next())
            {
                integer_t row_id = iter.RowId();
                if (row_id % num_threads_ != id)
                {
                    continue;
                }
                RowBase* row = iter.Row();

                int dst_rank = (table_id + row_id) % num_server;
                if (send_list[dst_rank] == nullptr)
                {
                    send_list[dst_rank] = new MsgPack(MsgType::Add,
                        MsgArrow::Worker2Server, src_rank, dst_rank);
                    send_ret_size[dst_rank] = 0;
                }
                // Format: table_id, row_id, number
                //         col_1, col2, ..., col_n, val_1, val_2, ..., val_n;
                int msg_size = sizeof(integer_t)* 3 + row->NonzeroSize() *
                    (table->ElementSize() + sizeof(integer_t));
                if (msg_size > kMaxMsgSize) 
                {
                    // TODO(feiga): we currently assume the row serialized size
                    // not ecceed kMaxMsgSize. should solve the issue later.
                    Log::Error("Row size exceed the max size of message\n");
                }
                if (send_ret_size[dst_rank] + msg_size > kMaxMsgSize)
                {
                    send_list[dst_rank]->Send(socket);
                    delete send_list[dst_rank];
                    send_list[dst_rank] = new MsgPack(MsgType::Add,
                        MsgArrow::Worker2Server, src_rank, dst_rank);
                    send_ret_size[dst_rank] = 0;
                }
                zmq::message_t* msg = new zmq::message_t(msg_size);
                integer_t* buffer = static_cast<integer_t*>(msg->data());
                buffer[0] = table_id;
                buffer[1] = row_id;
                row->Serialize(buffer + 2);
                send_list[dst_rank]->Push(msg);
                send_ret_size[dst_rank] += msg_size;
            }
        }
        for (int i = 0; i < num_server; ++i)
        {
            if (send_ret_size[i] > 0)
            {
                send_list[i]->Send(socket);
                delete send_list[i];
            }
        }
    }
コード例 #15
0
/*
	This function handles all initial messages from the user to the rendezvous server.  
	It will: 
	-List the groups currently active
	-Allow the user to quit
	-Put a user in a particular group


*/
void *handle_request(void *hrs){
	struct handle_request_struct *args = (struct handle_request_struct *) hrs;
	printf("handle IP: %s\n",args->IP);
	int socket = args->socketfd;
	while(1){
		printf("In handle_request while(1)\n");

		//Get the size of the message from the user
		int recv_bytes;
		read_socket(socket,&recv_bytes,sizeof(int));
		recv_bytes = ntohl(recv_bytes);

		//If it's zero, it means there's no message
		if(recv_bytes == 0) continue;
		printf("recv_bytes = %d\n",recv_bytes);
		char request_message[recv_bytes];
		memset(request_message,0,recv_bytes);

		printf("Message before: %s\n",request_message);

		//Get the message from the user
		read_socket(socket,request_message,recv_bytes);

		printf("Message after: %s\n",request_message);


		//The client is requesting a list of the groups that exist
		if(strcmp(request_message,"L") == 0)
			send_list(socket);

		//The client is quitting the session and will disconnect.
		else if(strcmp(request_message,"Q") == 0){
			printf("Thread exiting...\n");
			pthread_exit(NULL);
		}
		// This is a join request
		else if(request_message[0] == 'J'){
			char info[recv_bytes];
			slice_string(info,request_message,1);
			printf("Sliced string: %s\n",info);
			char *gname = strtok(info,":");
			char *uname = strtok(NULL,":");

			/*struct user u;
			init_user(&u);
			strcpy(u.IP,args->IP);
			strcpy(u.username,uname);*/
			// Check if group exists
			int index = group_index(gname);
			int userIndex;
			if(index == -1){  // Group doesn't exist, so we need to create it:

				userIndex = 0;
				index = num_groups;
				printf("Creating group...\n");
				all_groups[index] = (struct chat_group*) malloc (sizeof(struct chat_group));
				strcpy(all_groups[index]->groupname,gname);
				all_groups[index]->users[0] = (struct user*) malloc (sizeof(struct user));

				strcpy(all_groups[index]->users[0]->IP, args->IP);
				printf("From handle_request: IP: %s\n",all_groups[num_groups]->users[0]->IP);
				strcpy(all_groups[index]->users[0]->username, uname);
				all_groups[index]->num_users = 1;
				num_groups ++;
				printf("Group created!\n");
			}
			// Group exists, so just add the user to the group
			else {
				userIndex = all_groups[index]->num_users;
				all_groups[index]->users[all_groups[index]->num_users] = (struct user*) malloc (sizeof(struct user));
				strcpy(all_groups[index]->users[all_groups[index]->num_users]->IP, args->IP);
				strcpy(all_groups[index]->users[all_groups[index]->num_users]->username, uname);
				all_groups[index]->num_users++;
			}

			all_groups[index]->users[userIndex]->socket = socket;
			send_group(index,args->socketfd);

			// Generate a thread that handles in-group requests, like leaving and member-update requests
			//pthread_t in_group_requests;
			pthread_create(&all_groups[index]->users[userIndex]->in_group_req_thread,NULL,handle_in_group_requests,(void *) args->socketfd);
			pthread_exit(NULL);
			return 0;	// In case thread exit didn't work
		}

		//pthread_exit(NULL);
	}
	pthread_exit(NULL);
}