예제 #1
0
void FeatureFloodCount::communicateAndMerge()
{
  // First we need to transform the raw data into a usable data structure
  populateDataStructuresFromFloodData();

  /*********************************************************************************
   *********************************************************************************
   * Begin Parallel Communication Section
   *********************************************************************************
   *********************************************************************************/

  /**
   * The libMesh packed range routines handle the communication of the individual
   * string buffers. Here we need to create a container to hold our type
   * to serialize. It'll always be size one because we are sending a single
   * byte stream of all the data to other processors. The stream need not be
   * the same size on all processors.
   */
  std::vector<std::string> send_buffers(1);

  /**
   * Additionally we need to create a different container to hold the received
   * byte buffers. The container type need not match the send container type.
   * However, We do know the number of incoming buffers (num processors) so we'll
   * go ahead and use a vector.
   */
  std::vector<std::string> recv_buffers;
  recv_buffers.reserve(_app.n_processors());

  serialize(send_buffers[0]);

  /**
   * Each processor needs information from all other processors to create a complete
   * global feature map.
   */
  _communicator.allgather_packed_range((void *)(NULL), send_buffers.begin(), send_buffers.end(),
                                       std::back_inserter(recv_buffers));

  deserialize(recv_buffers);

  /*********************************************************************************
   *********************************************************************************
   * End Parallel Communication Section
   *********************************************************************************
   *********************************************************************************/

  // We'll inflate the bounding boxes by a percentage of the domain
  RealVectorValue inflation;
  for (unsigned int i = 0; i < LIBMESH_DIM; ++i)
    inflation(i) = _mesh.dimensionWidth(i);

  // Let's try 1%
  inflation *= 0.01;
  inflateBoundingBoxes(inflation);

  mergeSets(true);
}
예제 #2
0
void FeatureFloodCount::communicateAndMerge()
{
  // First we need to transform the raw data into a usable data structure
  prepareDataForTransfer();

  /*********************************************************************************
   *********************************************************************************
   * Begin Parallel Communication Section
   *********************************************************************************
   *********************************************************************************/

  /**
   * The libMesh packed range routines handle the communication of the individual
   * string buffers. Here we need to create a container to hold our type
   * to serialize. It'll always be size one because we are sending a single
   * byte stream of all the data to other processors. The stream need not be
   * the same size on all processors.
   */
  std::vector<std::string> send_buffers(1);

  /**
   * Additionally we need to create a different container to hold the received
   * byte buffers. The container type need not match the send container type.
   * However, We do know the number of incoming buffers (num processors) so we'll
   * go ahead and use a vector.
   */
  std::vector<std::string> recv_buffers;
  recv_buffers.reserve(_app.n_processors());

  serialize(send_buffers[0]);

  /**
   * Each processor needs information from all other processors to create a complete
   * global feature map.
   */
  _communicator.allgather_packed_range((void *)(nullptr), send_buffers.begin(), send_buffers.end(),
                                       std::back_inserter(recv_buffers));

  deserialize(recv_buffers);

  /*********************************************************************************
   *********************************************************************************
   * End Parallel Communication Section
   *********************************************************************************
   *********************************************************************************/

  mergeSets(true);
}
예제 #3
0
void FeatureFloodCount::communicateAndMerge()
{
  // First we need to transform the raw data into a usable data structure
  prepareDataForTransfer();

  /**
   * The libMesh packed range routines handle the communication of the individual
   * string buffers. Here we need to create a container to hold our type
   * to serialize. It'll always be size one because we are sending a single
   * byte stream of all the data to other processors. The stream need not be
   * the same size on all processors.
   */
  std::vector<std::string> send_buffers(1);

  /**
   * Additionally we need to create a different container to hold the received
   * byte buffers. The container type need not match the send container type.
   * However, We do know the number of incoming buffers (num processors) so we'll
   * go ahead and use a vector.
   */
  std::vector<std::string> recv_buffers;
  if (_is_master)
    recv_buffers.reserve(_app.n_processors());

  serialize(send_buffers[0]);

  // Free up as much memory as possible here before we do global communication
  clearDataStructures();

  /**
   * Send the data from all processors to the root to create a complete
   * global feature map.
   */
  _communicator.gather_packed_range(0, (void *)(nullptr), send_buffers.begin(), send_buffers.end(),
                                    std::back_inserter(recv_buffers));

  if (_is_master)
  {
    // The root process now needs to deserialize and merge all of the data
    deserialize(recv_buffers);
    recv_buffers.clear();

    mergeSets(true);
  }

  // Make sure that feature count is communicated to all ranks
  _communicator.broadcast(_feature_count);
}
예제 #4
0
void
SlopeReconstructionBase::finalize()
{
  ElementLoopUserObject::finalize();

  if (_app.n_processors() > 1)
  {
    _side_geoinfo_cached = true;

    std::vector<std::string> send_buffers(1);
    std::vector<std::string> recv_buffers;

    recv_buffers.reserve(_app.n_processors());
    serialize(send_buffers[0]);
    comm().allgather_packed_range((void *)(nullptr), send_buffers.begin(), send_buffers.end(), std::back_inserter(recv_buffers));
    deserialize(recv_buffers);
  }
}
/*
 * Handle an ack charLpEvent. 
 */
static void vioHandleAck(struct HvLpEvent *event)
{
	struct viocharlpevent *cevent = (struct viocharlpevent *)event;
	unsigned long flags;
	u8 port = cevent->virtual_device;

	if (port >= VTTY_PORTS) {
		printk(VIOCONS_KERN_WARN "data on invalid virtual device\n");
		return;
	}

	spin_lock_irqsave(&consolelock, flags);
	port_info[port].ack = event->xCorrelationToken;
	spin_unlock_irqrestore(&consolelock, flags);

	if (port_info[port].used)
		send_buffers(&port_info[port]);
}
예제 #6
0
int proxy_data_coming(event_t *ev, void *arg)
{
    request_t *req;
    connection_t *conn = (connection_t *)arg;
    conn->buf = lt_new_buffer_chain(conn->buf_pool_manager,
            DEFAULT_UPSTREAM_BUFFER_SIZE);
    int rv = lt_recv(conn->fd, conn->buf);//nginx just recv a part
    if (rv == LAGAIN) {
        conn->status = L_PROXY_WAITING_RESPONSE;
    } else if (rv == LCLOSE) {
        debug_print("%s", "proxy FD CLOSE\n");
        conn->status = L_PROXY_CLOSING;
    } else if (rv == LERROR) {
        conn->status = L_PROXY_ERROR;
    }

    if (conn->status == L_PROXY_WAITING_RESPONSE) {
        req = http_create_request(conn);
        http_process_response_line(conn, req);

        debug_print("%s", "SUCCESS parse response\n");
        //send_chains(ev->base, conn->pair->fd, &chain);
//        http_process_response_line(conn, req);
//        http_process_request_line(conn, req);
    } 

    if (conn->status == L_HTTP_WROTE_RESPONSE_HEADER) {
//      if (conn->chunked) {
        int client_fd = conn->pair->fd;
        send_buffers(conn->ev->base, client_fd, conn->buf);
//optional
//A: splice2 in splice2 out
//B: lt_recv() in vmsplice2 out
//C: 
        //http_check_chunked(conn->buf);//TODO
//      }
//        conn->handler(conn, conn->handler_arg);
    }
    //send_chains(ev->base, conn->fd, <#lt_chain_t *#>)
    return 0;
}
예제 #7
0
void
RestartableTypesChecker::execute()
{
  // First we need to check that the data has been restored properly
  checkData();

  /**
   * For testing the packed range routines, we'll make sure that we can pack up several types
   * into a single string and send it as a packed range and successfully restore it.
   */

  // Buffers for parallel communication
  std::vector<std::string> send_buffers(1);
  std::vector<std::string> recv_buffers;

  // String streams for serialization and deserialization
  std::ostringstream oss;
  std::istringstream iss;

  /**
   * Serialize
   */
  dataStore(oss, _real_data, this);
  dataStore(oss, _vector_data, this);
  dataStore(oss, _vector_vector_data, this);
  dataStore(oss, _pointer_data, this);
  dataStore(oss, _custom_data, this);
  dataStore(oss, _set_data, this);
  dataStore(oss, _map_data, this);
  dataStore(oss, _dense_vector_data, this);
  dataStore(oss, _dense_matrix_data, this);

  send_buffers[0] = oss.str();

  /**
   * Communicate
   */
  recv_buffers.reserve(_app.n_processors());
  _communicator.allgather_packed_range(
      (void *)(NULL), send_buffers.begin(), send_buffers.end(), std::back_inserter(recv_buffers));

  if (recv_buffers.size() != _app.n_processors())
    mooseError("Error in sizes of communicated buffers");

  /**
   * Deserialize and check
   */
  for (unsigned int i = 0; i < recv_buffers.size(); ++i)
  {
    iss.str(recv_buffers[i]);
    // reset the stream state
    iss.clear();

    // Clear types (just to make sure we don't get any false positives in our testing)
    clearTypes();

    // Now load the values
    dataLoad(iss, _real_data, this);
    dataLoad(iss, _vector_data, this);
    dataLoad(iss, _vector_vector_data, this);
    dataLoad(iss, _pointer_data, this);
    dataLoad(iss, _custom_data, this);
    dataLoad(iss, _set_data, this);
    dataLoad(iss, _map_data, this);
    dataLoad(iss, _dense_vector_data, this);
    dataLoad(iss, _dense_matrix_data, this);
    dataLoad(iss, *this, this);

    // Finally confirm that the data is sane
    checkData();
  }
}
/*
 * Our internal writer.  Gets called both from the console device and
 * the tty device.  the tty pointer will be NULL if called from the console.
 * Return total number of bytes "written".
 *
 * NOTE: Don't use printk in here because it gets nastily recursive.  hvlog
 * can be used to log to the hypervisor buffer
 */
static int internal_write(struct port_info *pi, const char *buf,
			  size_t len, struct viocharlpevent *viochar)
{
	HvLpEvent_Rc hvrc;
	size_t bleft;
	size_t curlen;
	const char *curbuf;
	unsigned long flags;
	int copy_needed = (viochar == NULL);

	/*
	 * Write to the hvlog of inbound data are now done prior to
	 * calling internal_write() since internal_write() is only called in
	 * the event that an lp event path is active, which isn't the case for
	 * logging attempts prior to console initialization.
	 *
	 * If there is already data queued for this port, send it prior to
	 * attempting to send any new data.
	 */
	if (pi->used)
		send_buffers(pi);

	spin_lock_irqsave(&consolelock, flags);

	/*
	 * If the internal_write() was passed a pointer to a
	 * viocharlpevent then we don't need to allocate a new one
	 * (this is the case where we are internal_writing user space
	 * data).  If we aren't writing user space data then we need
	 * to get an event from viopath.
	 */
	if (copy_needed) {
		/* This one is fetched from the viopath data structure */
		viochar = (struct viocharlpevent *)
			vio_get_event_buffer(viomajorsubtype_chario);
		/* Make sure we got a buffer */
		if (viochar == NULL) {
			spin_unlock_irqrestore(&consolelock, flags);
			hvlog("\n\rviocons: Can't get viochar buffer in internal_write().");
			return -EAGAIN;
		}
		initDataEvent(viochar, pi->lp);
	}

	curbuf = buf;
	bleft = len;

	while ((bleft > 0) && (pi->used == 0) &&
	       ((pi->seq - pi->ack) < VIOCHAR_WINDOW)) {
		if (bleft > VIOCHAR_MAX_DATA)
			curlen = VIOCHAR_MAX_DATA;
		else
			curlen = bleft;

		viochar->event.xCorrelationToken = pi->seq++;

		if (copy_needed) {
			memcpy(viochar->data, curbuf, curlen);
			viochar->len = curlen;
		}

		viochar->event.xSizeMinus1 =
		    offsetof(struct viocharlpevent, data) + curlen;

		hvrc = HvCallEvent_signalLpEvent(&viochar->event);
		if (hvrc) {
			spin_unlock_irqrestore(&consolelock, flags);
			if (copy_needed)
				vio_free_event_buffer(viomajorsubtype_chario, viochar);

			hvlog("viocons: error sending event! %d\n", (int)hvrc);
			return len - bleft;
		}

		curbuf += curlen;
		bleft -= curlen;
	}

	/* If we didn't send it all, buffer as much of it as we can. */
	if (bleft > 0)
		bleft -= buffer_add(pi, curbuf, bleft);
	/*
	 * Since we grabbed it from the viopath data structure, return
	 * it to the data structure.
	 */
	if (copy_needed)
		vio_free_event_buffer(viomajorsubtype_chario, viochar);
	spin_unlock_irqrestore(&consolelock, flags);

	return len - bleft;
}