예제 #1
0
	void MemoryBufferForQuery::LoadLexicon(istream& ifs){
		string line;
		vector<string> vct;
		vct.reserve(2);
		P_INFO("Loading Lexicon...");
		int nline = 0;
		while(ifs){
			line = "";
			getline(ifs, line);
			split(vct,line,is_any_of(" "),token_compress_on);
			if(vct.size()<2){
				continue;
			}
			size_t lineno = lexical_cast<int>(vct[0]);
			if(lineno < (size_t) nline + 1){
				P_FATAL("Mismatch (duplicate) line number, line (%d) \"%s\"",nline+1, line.c_str());
			}else if(lineno > nline + 1){
				P_ERROR("Mismatch (extra) line number, line (%d) \"%s\"",nline+1, line.c_str());
				while(lexicon.size() < lineno-1){
					lexicon.push_back(string());
				}
			}
			lexicon.push_back(vct[1]);
			dict.insert(DictType::value_type(vct[1],lineno));
			nline = lineno;
		}
		P_INFO("Loaded %d entries in lexicon", nline);
	}
예제 #2
0
파일: monreader.c 프로젝트: 274914765/C
/******************************************************************************
 *                              module init/exit                              *
 *****************************************************************************/
static int __init mon_init(void)
{
    int rc;

    if (!MACHINE_IS_VM) {
        P_ERROR("not running under z/VM, driver not loaded\n");
        return -ENODEV;
    }

    /*
     * Register with IUCV and connect to *MONITOR service
     */
    rc = iucv_register(&monreader_iucv_handler, 1);
    if (rc) {
        P_ERROR("failed to register with iucv driver\n");
        return rc;
    }
    P_INFO("open, registered with IUCV\n");

    rc = segment_type(mon_dcss_name);
    if (rc < 0) {
        segment_warning(rc, mon_dcss_name);
        goto out_iucv;
    }
    if (rc != SEG_TYPE_SC) {
        P_ERROR("segment %s has unsupported type, should be SC\n",
            mon_dcss_name);
        rc = -EINVAL;
        goto out_iucv;
    }

    rc = segment_load(mon_dcss_name, SEGMENT_SHARED,
              &mon_dcss_start, &mon_dcss_end);
    if (rc < 0) {
        segment_warning(rc, mon_dcss_name);
        rc = -EINVAL;
        goto out_iucv;
    }
    dcss_mkname(mon_dcss_name, &user_data_connect[8]);

    rc = misc_register(&mon_dev);
    if (rc < 0 ) {
        P_ERROR("misc_register failed, rc = %i\n", rc);
        goto out;
    }
    P_INFO("Loaded segment %s from %p to %p, size = %lu Byte\n",
        mon_dcss_name, (void *) mon_dcss_start, (void *) mon_dcss_end,
        mon_dcss_end - mon_dcss_start + 1);
    return 0;

out:
    segment_unload(mon_dcss_name);
out_iucv:
    iucv_unregister(&monreader_iucv_handler, 1);
    return rc;
}
예제 #3
0
/*! 
 * \brief Wait for all send/receives of bufinfo to complete
 * \param[in] node address of CommQueue node
 * \return Return Code
 *
 * This routine is to be executed during the ::MB_COMM_OLD_BUFINFO_SENT stage.
 * 
 * Steps:
 * -# if node->pending_in != 0, MPI_Testall() receives
 *  - if completed, set node->pending_in = 0
 * -# if node->pending_out != 0, MPI_Testall() sends
 *  - if completed, set node->pending_out = 0
 * -# if node->pending_in == 0 and node->pending_out == 0
 *  - set node->stage = MB_COMM_OLD_PRE_PROPAGATION
 * 
 * Post:
 * - if node->pending_in == 0 and node->pending_out == 0
 *  - node->stage == MB_COMM_OLD_PRE_PROPAGATION
 * - else
 *  - node->stage == MB_COMM_OLD_BUFINFO_SENT
 */ 
int MBI_CommRoutine_OLD_WaitBufInfo(struct MBIt_commqueue *node) {
    
    int rc, flag;
    
    assert(node->stage == MB_COMM_OLD_BUFINFO_SENT); 
    assert(node->outcount != NULL);
    assert(node->incount  != NULL);
    assert(node->sendreq  != NULL);
    assert(node->recvreq  != NULL);
    assert(node->board != NULL);
    
    /* check receives */
    if (node->pending_in != 0)
    {
        rc = MPI_Testall(MBI_CommSize, node->recvreq, &flag, MPI_STATUSES_IGNORE);
        assert(rc == MPI_SUCCESS);
        if (rc != MPI_SUCCESS) return MB_ERR_MPI;
        
        if (flag)
        {
            P_INFO("COMM: (Board %d) all buffer sizes received", node->mb);
            node->pending_in = 0;
        }
    }
    
    /* check sends */
    if (node->pending_out != 0)
    {
        rc = MPI_Testall(MBI_CommSize, node->sendreq, &flag, MPI_STATUSES_IGNORE);
        assert(rc == MPI_SUCCESS);
        if (rc != MPI_SUCCESS) return MB_ERR_MPI;
        
        if (flag) 
        {
            P_INFO("COMM: (Board %d) all buffer sizes sent", node->mb);
            node->pending_out = 0;
        }
    }
    
    /* if all done, move on to next stage */
    if (node->pending_in == 0 && node->pending_out == 0)
    {
        P_INFO("COMM: (Board %d) moving to MB_COMM_OLD_PRE_PROPAGATION stage", node->mb);
        node->stage = MB_COMM_OLD_PRE_PROPAGATION;
    }

    return MB_SUCCESS;
}
예제 #4
0
파일: monreader.c 프로젝트: 274914765/C
static int mon_close(struct inode *inode, struct file *filp)
{
    int rc, i;
    struct mon_private *monpriv = filp->private_data;

    /*
     * Close IUCV connection and unregister
     */
    rc = iucv_path_sever(monpriv->path, user_data_sever);
    if (rc)
        P_ERROR("close, iucv_sever failed with rc = %i\n", rc);
    else
        P_INFO("close, terminated connection to *MONITOR service\n");

    atomic_set(&monpriv->iucv_severed, 0);
    atomic_set(&monpriv->iucv_connected, 0);
    atomic_set(&monpriv->read_ready, 0);
    atomic_set(&monpriv->msglim_count, 0);
    monpriv->write_index  = 0;
    monpriv->read_index   = 0;

    for (i = 0; i < MON_MSGLIM; i++)
        kfree(monpriv->msg_array[i]);
    kfree(monpriv);
    clear_bit(MON_IN_USE, &mon_in_use);
    return 0;
}
예제 #5
0
	//////////////////////////////////////////////////////////////////////////
	// Main API functions
	InvertedDocument::InvertedDocument(bool index, const char* root, int splits , int buffer_size)
		:totalSplits(splits), rootDir(root), indexMode(index),rootDirPath(root), buffer(buffer_size),
		totalDocuments(0){
			corpusMaster = chunkRelink = corpusSentMaster = NULL;
			if(exists(rootDirPath)){ // Check directory exist
				if(indexMode){
					P_FATAL("The directory for indexing \"%s\" already exists! Cowardly refuse to overwrite it. Quitting",rootDir.c_str());
				}
				boost::filesystem::path rel = rootDirPath / "maxsplit";
				ifstream ifs(rel.string().c_str());
				if(!ifs){
					P_FATAL("The directory does not contain required maxsplit file. Quitting");
				}
				ifs >> totalSplits;
				ifs.close();
				if(totalSplits != splits){
					P_INFO("The directory shows the number of splits is %d instead of specified (or default) %d",totalSplits,splits);
				}
				rel = rootDirPath / "dict";
				ifstream idic(rel.string().c_str());
				C_FATAL(idic);
				qbuffer.LoadLexicon(idic);
				idic.close();
				chunkRelink = OpenFile("crl");
				C_FATAL(chunkRelink);
				qbuffer.LoadChunkRelink(chunkRelink);
				fclose(chunkRelink);
				chunkRelink = NULL;
				corpusMaster = OpenFile("corpus/master");
				C_FATAL(corpusMaster);
				qbuffer.LoadCorpusBounds(corpusMaster);
				fclose(corpusMaster);
			}else{
				if(!indexMode){
예제 #6
0
파일: monreader.c 프로젝트: 274914765/C
/******************************************************************************
 *                               file operations                              *
 *****************************************************************************/
static int mon_open(struct inode *inode, struct file *filp)
{
    struct mon_private *monpriv;
    int rc;

    /*
     * only one user allowed
     */
    rc = -EBUSY;
    if (test_and_set_bit(MON_IN_USE, &mon_in_use))
        goto out;

    rc = -ENOMEM;
    monpriv = mon_alloc_mem();
    if (!monpriv)
        goto out_use;

    /*
     * Connect to *MONITOR service
     */
    monpriv->path = iucv_path_alloc(MON_MSGLIM, IUCV_IPRMDATA, GFP_KERNEL);
    if (!monpriv->path)
        goto out_priv;
    rc = iucv_path_connect(monpriv->path, &monreader_iucv_handler,
                   MON_SERVICE, NULL, user_data_connect, monpriv);
    if (rc) {
        P_ERROR("iucv connection to *MONITOR failed with "
            "IPUSER SEVER code = %i\n", rc);
        rc = -EIO;
        goto out_path;
    }
    /*
     * Wait for connection confirmation
     */
    wait_event(mon_conn_wait_queue,
           atomic_read(&monpriv->iucv_connected) ||
           atomic_read(&monpriv->iucv_severed));
    if (atomic_read(&monpriv->iucv_severed)) {
        atomic_set(&monpriv->iucv_severed, 0);
        atomic_set(&monpriv->iucv_connected, 0);
        rc = -EIO;
        goto out_path;
    }
    P_INFO("open, established connection to *MONITOR service\n\n");
    filp->private_data = monpriv;
    return nonseekable_open(inode, filp);

out_path:
    kfree(monpriv->path);
out_priv:
    mon_free_mem(monpriv);
out_use:
    clear_bit(MON_IN_USE, &mon_in_use);
out:
    return rc;
}
예제 #7
0
static int cu_tail_reopen(cu_tail_t *obj) {
  int seek_end = 0;
  struct stat stat_buf = {0};

  int status = stat(obj->file, &stat_buf);
  if (status != 0) {
    P_ERROR("utils_tail: stat (%s) failed: %s", obj->file, STRERRNO);
    return -1;
  }

  /* The file is already open.. */
  if ((obj->fh != NULL) && (stat_buf.st_ino == obj->stat.st_ino)) {
    /* Seek to the beginning if file was truncated */
    if (stat_buf.st_size < obj->stat.st_size) {
      P_INFO("utils_tail: File `%s' was truncated.", obj->file);
      status = fseek(obj->fh, 0, SEEK_SET);
      if (status != 0) {
        P_ERROR("utils_tail: fseek (%s) failed: %s", obj->file, STRERRNO);
        fclose(obj->fh);
        obj->fh = NULL;
        return -1;
      }
    }
    memcpy(&obj->stat, &stat_buf, sizeof(struct stat));
    return 1;
  }

  /* Seek to the end if we re-open the same file again or the file opened
   * is the first at all or the first after an error */
  if ((obj->stat.st_ino == 0) || (obj->stat.st_ino == stat_buf.st_ino))
    seek_end = 1;

  FILE *fh = fopen(obj->file, "r");
  if (fh == NULL) {
    P_ERROR("utils_tail: fopen (%s) failed: %s", obj->file, STRERRNO);
    return -1;
  }

  if (seek_end != 0) {
    status = fseek(fh, 0, SEEK_END);
    if (status != 0) {
      P_ERROR("utils_tail: fseek (%s) failed: %s", obj->file, STRERRNO);
      fclose(fh);
      return -1;
    }
  }

  if (obj->fh != NULL)
    fclose(obj->fh);
  obj->fh = fh;
  memcpy(&obj->stat, &stat_buf, sizeof(struct stat));

  return 0;
} /* int cu_tail_reopen */
예제 #8
0
/*!
 * \brief Deletes an Iterator
 * \ingroup MB_API
 * \param[in,out] itr_ptr Address of Iterator Handle
 * 
 * Upon successful removal of the reference to the Iterator from the 
 * ::MBI_OM_iterator ObjectMap, we first delete the pooled-list associated 
 * with the Iterator and then deallocate the Iterator object.
 * 
 * \note It is valid to delete a null Iterator (::MB_NULL_ITERATOR). The routine
 * will return immediately with ::MB_SUCCESS.
 * 
 * Possible return codes:
 *  - ::MB_SUCCESS 
 *  - ::MB_ERR_INVALID (invalid Iterator given) 
 *  - ::MB_ERR_INTERNAL (possible bug. Recompile and run in debug mode for hints)
 */
int MB_Iterator_Delete(MBt_Iterator *itr_ptr) {
	
    int rc;
    MBIt_Iterator *iter;

    /* nothing to do for null iterator */
    if (*itr_ptr == MB_NULL_ITERATOR)
    {
        P_WARNING("Deletion of null iterator (MB_NULL_ITERATOR)");
        return MB_SUCCESS;
    }
    
    /* pop iterator from object map */
    assert(MBI_OM_iterator != NULL);
    assert(MBI_OM_iterator->type == OM_TYPE_ITERATOR);
    iter = (MBIt_Iterator *)MBI_objmap_pop(MBI_OM_iterator, (OM_key_t)*itr_ptr);
    if (iter == NULL) 
    {
        P_FUNCFAIL("Invalid iterator handle (%d)", (int)*itr_ptr);
        return MB_ERR_INVALID;
    }
    
    assert(iter != NULL);
    assert(iter->data != NULL);
    
    /* free memory used by pooled list */
    rc = pl_delete(&(iter->data));
    assert(rc == PL_SUCCESS);
    
    /* deallocate iterator object */
    free(iter);
    
    if (rc != PL_SUCCESS) 
    {
        P_FUNCFAIL("pl_delete() returned with err code %d", rc);
        return MB_ERR_INTERNAL;
    }
    
    P_INFO("Deleted iterator (%d)", (int)*itr_ptr);
    *itr_ptr = MB_NULL_ITERATOR;
    
    return MB_SUCCESS;
}
예제 #9
0
// The 'self' parameter is for keeping the current Group object alive while this thread is running.
void
Group::finalizeRestart(GroupPtr self, Options options, RestartMethod method,
	SpawnerFactoryPtr spawnerFactory, unsigned int restartsInitiated,
	vector<Callback> postLockActions)
{
	TRACE_POINT();

	Pool::runAllActions(postLockActions);
	postLockActions.clear();

	this_thread::disable_interruption di;
	this_thread::disable_syscall_interruption dsi;

	// Create a new spawner.
	SpawnerPtr newSpawner = spawnerFactory->create(options);
	SpawnerPtr oldSpawner;

	UPDATE_TRACE_POINT();
	PoolPtr pool = getPool();

	Pool::DebugSupportPtr debug = pool->debugSupport;
	if (debug != NULL && debug->restarting) {
		this_thread::restore_interruption ri(di);
		this_thread::restore_syscall_interruption rsi(dsi);
		this_thread::interruption_point();
		debug->debugger->send("About to end restarting");
		debug->messages->recv("Finish restarting");
	}

	ScopedLock l(pool->syncher);
	if (!isAlive()) {
		P_DEBUG("Group " << name << " is shutting down, so aborting restart");
		return;
	}
	if (restartsInitiated != this->restartsInitiated) {
		// Before this restart could be finalized, another restart command was given.
		// The spawner we just created might be out of date now so we abort.
		P_DEBUG("Restart of group " << name << " aborted because a new restart was initiated concurrently");
		if (debug != NULL && debug->restarting) {
			debug->debugger->send("Restarting aborted");
		}
		return;
	}

	// Run some sanity checks.
	pool->fullVerifyInvariants();
	assert(m_restarting);
	UPDATE_TRACE_POINT();
	
	// Atomically swap the new spawner with the old one.
	resetOptions(options);
	oldSpawner = spawner;
	spawner    = newSpawner;

	m_restarting = false;
	if (shouldSpawn()) {
		spawn();
	} else if (isWaitingForCapacity()) {
		P_INFO("Group " << name << " is waiting for capacity to become available. "
			"Trying to shutdown another idle process to free capacity...");
		if (pool->forceFreeCapacity(this, postLockActions) != NULL) {
			spawn();
		} else {
			P_INFO("There are no processes right now that are eligible "
				"for shutdown. Will try again later.");
		}
	}
	verifyInvariants();

	l.unlock();
	oldSpawner.reset();
	Pool::runAllActions(postLockActions);
	P_DEBUG("Restart of group " << name << " done");
	if (debug != NULL && debug->restarting) {
		debug->debugger->send("Restarting done");
	}
}
예제 #10
0
// The 'self' parameter is for keeping the current Group object alive while this thread is running.
void
Group::spawnThreadOOBWRequest(GroupPtr self, ProcessPtr process) {
	TRACE_POINT();
	this_thread::disable_interruption di;
	this_thread::disable_syscall_interruption dsi;

	Socket *socket;
	Connection connection;
	PoolPtr pool = getPool();
	Pool::DebugSupportPtr debug = pool->debugSupport;

	UPDATE_TRACE_POINT();
	P_DEBUG("Performing OOBW request for process " << process->inspect());
	if (debug != NULL && debug->oobw) {
		debug->debugger->send("OOBW request about to start");
		debug->messages->recv("Proceed with OOBW request");
	}
	
	UPDATE_TRACE_POINT();
	{
		// Standard resource management boilerplate stuff...
		boost::unique_lock<boost::mutex> lock(pool->syncher);
		if (OXT_UNLIKELY(!process->isAlive()
			|| process->enabled == Process::DETACHED
			|| !isAlive()))
		{
			return;
		}

		if (process->enabled != Process::DISABLED) {
			UPDATE_TRACE_POINT();
			P_INFO("Out-of-Band Work canceled: process " << process->inspect() <<
				" was concurrently re-enabled.");
			if (debug != NULL && debug->oobw) {
				debug->debugger->send("OOBW request canceled");
			}
			return;
		}
		
		assert(process->oobwStatus == Process::OOBW_IN_PROGRESS);
		assert(process->sessions == 0);
		socket = process->sessionSockets.top();
		assert(socket != NULL);
	}
	
	UPDATE_TRACE_POINT();
	unsigned long long timeout = 1000 * 1000 * 60; // 1 min
	try {
		this_thread::restore_interruption ri(di);
		this_thread::restore_syscall_interruption rsi(dsi);

		// Grab a connection. The connection is marked as fail in order to
		// ensure it is closed / recycled after this request (otherwise we'd
		// need to completely read the response).
		connection = socket->checkoutConnection();
		connection.fail = true;
		ScopeGuard guard(boost::bind(&Socket::checkinConnection, socket, connection));
		
		// This is copied from RequestHandler when it is sending data using the
		// "session" protocol.
		char sizeField[sizeof(uint32_t)];
		SmallVector<StaticString, 10> data;

		data.push_back(StaticString(sizeField, sizeof(uint32_t)));
		data.push_back(makeStaticStringWithNull("REQUEST_METHOD"));
		data.push_back(makeStaticStringWithNull("OOBW"));

		data.push_back(makeStaticStringWithNull("PASSENGER_CONNECT_PASSWORD"));
		data.push_back(makeStaticStringWithNull(process->connectPassword));

		uint32_t dataSize = 0;
		for (unsigned int i = 1; i < data.size(); i++) {
			dataSize += (uint32_t) data[i].size();
		}
		Uint32Message::generate(sizeField, dataSize);

		gatheredWrite(connection.fd, &data[0], data.size(), &timeout);

		// We do not care what the actual response is ... just wait for it.
		UPDATE_TRACE_POINT();
		waitUntilReadable(connection.fd, &timeout);
	} catch (const SystemException &e) {
		P_ERROR("*** ERROR: " << e.what() << "\n" << e.backtrace());
	} catch (const TimeoutException &e) {
		P_ERROR("*** ERROR: " << e.what() << "\n" << e.backtrace());
	}
	
	UPDATE_TRACE_POINT();
	vector<Callback> actions;
	{
		// Standard resource management boilerplate stuff...
		PoolPtr pool = getPool();
		boost::unique_lock<boost::mutex> lock(pool->syncher);
		if (OXT_UNLIKELY(!process->isAlive() || !isAlive())) {
			return;
		}
		
		process->oobwStatus = Process::OOBW_NOT_ACTIVE;
		if (process->enabled == Process::DISABLED) {
			enable(process, actions);
			assignSessionsToGetWaiters(actions);
		}

		pool->fullVerifyInvariants();

		initiateNextOobwRequest();
	}
	UPDATE_TRACE_POINT();
	runAllActions(actions);
	actions.clear();

	UPDATE_TRACE_POINT();
	P_DEBUG("Finished OOBW request for process " << process->inspect());
	if (debug != NULL && debug->oobw) {
		debug->debugger->send("OOBW request finished");
	}
}
/*!
 * \brief Creates a filtered and sorted Iterator for accessing a selection of 
 *        messages in the MessageBoard
 * \ingroup MB_API
 * \param[in] mb MessageBoard handle
 * \param[out] itr_ptr Address of Iterator Handle
 * \param[in] filterFunc Pointer to user-defined filter function
 * \param[in] filterFuncParams Pointer to input data that will be passed into \c filterFunc 
 * \param[in] cmpFunc Pointer to user-defined compariosn function
 * 
 * 
 * The Iterator object is allocated and populated with messages from an array 
 * returned by the \c get_sorted_filtered_ptr_list() (static function defined 
 * in iterator_createfilteredsorted.c). The Iterator is then registered 
 * with the ::MBI_OM_iterator and the reference ID returned by ObjectMap 
 * is then written to \c itr_ptr as the handle.
 * 
 * \c get_sorted_filtered_ptr_list() populates a message array by traversing 
 * the MessageBoard and selecting messages that are accepted by \c filterFunc.
 * It then uses \c cmpFunc with \c qsort() (from \c stdlib.h) for sorting before 
 * returning a pointer to the message array. 
 * 
 * The use of \c qsort() got a little 
 * messy as we need to sort the list based on the \em value of the messages whereas
 * the array stores only \em pointers to those messages. For now, \c qsort() is given 
 * \c ptrSort() -- a static function which dereferences the message pointers before
 * calling \c cmpFunc. The messy part is that  \c cmpFunc cannot be passed to 
 * \c ptrSort() as an argument (for compatibily with \c qsort()) so it has to be
 * handed over using a global variable (\c funcPtr). This global variable makes
 * our routine non thread-safe.
 * 
 *  
 * We expect \c filterFunc() to return a \c 0 if a message is to be rejected,
 * and a non-zero \c int if it is to be accepted.
 * 
 * We expect \c cmpFunc() to return an integer 
 * less than, equal to, or greater than zero if the first message is 
 * considered to be respectively less than, equal to, or greater than the 
 * second. In short:
 *  - <tt>0 if (msg1 == msg2)</tt>
 *  - <tt>\< 0 if (msg1 \< msg2)</tt>
 *  - <tt>\> 0 if (msg1 \> msg2)</tt>
 * 
 * References to messages are stored within a pooled_list and traversed as 
 * a linked list. The memory block size for the Iterator pooled_list is chosen to
 * be half that of the default MessageBoard block size.
 * 
 * Message references are stored in the same order they appear in the board.
 * 
 * \note We only store pointers to message objects within the Iterator, and
 *       not the actual messages. These pointers will be invalid if the 
 *       MessageBoard is deleted of modified. Checking the validity of 
 *       messages each time it is accessed would be too great an overhead.
 * 
 * Possible return codes:
 *  - ::MB_SUCCESS 
 *  - ::MB_ERR_INVALID (invalid or null board given) 
 *  - ::MB_ERR_MEMALLOC (error allocating memory for Iterator object or pooled_list)
 *  - ::MB_ERR_LOCKED (\c mb is locked)
 *  - ::MB_ERR_INTERNAL (possible bug. Recompile and run in debug mode for hints)
 *  - ::MB_ERR_OVERFLOW (MessageBoard overflow. Too many Iterators created.)
 */
int MB_Iterator_CreateFilteredSorted(MBt_Board mb, MBt_Iterator *itr_ptr, \
        int (*filterFunc)(const void *msg, const void *params), \
        void *filterFuncParams, \
        int (*cmpFunc)(const void *msg1, const void *msg2) ) {
    
    int rc, mcount, i, elemOut = 0;
    OM_key_t rc_om;
    void **ptr_array = NULL;
    void *new;
    MBIt_Board *board;
    MBIt_Iterator *iter;
    
    /* Check for NULL message board */
    if (mb == MB_NULL_MBOARD) 
    {
        P_FUNCFAIL("Cannot create iterator for null board (MB_NULL_MBOARD)");
        return MB_ERR_INVALID;
    }
    
    /* get ptr to board */
    board = (MBIt_Board*)MBI_getMBoardRef(mb);
    if (board == NULL) 
    {
        P_FUNCFAIL("Invalid board handle (%d)", (int)mb);
        return MB_ERR_INVALID;
    }
    mcount = (int)board->data->count_current;
    
    /* check if board is locked */
    if (board->locked == MB_TRUE) 
    {
        P_FUNCFAIL("Board (%d) is locked", (int)mb);
        return MB_ERR_LOCKED;
    }
    
    /* check if board is "unreadable" */
    if (board->is_reader == MB_FALSE)
    {
        P_FUNCFAIL("Board access mode was set to non-readable");
        return MB_ERR_DISABLED;
    }
    
    /* Allocate Iterator object */
    iter = (MBIt_Iterator*)malloc(sizeof(MBIt_Iterator));
    assert(iter != NULL);
    if (iter == NULL) 
    {
        P_FUNCFAIL("Could not allocate required memory");
        return MB_ERR_MEMALLOC;
    }
    
    /* assign mb handle to iterator */
    iter->mb        = mb;
    iter->msgsize   = board->data->elem_size;
    iter->cursor    = NULL;
    iter->iterating = 0;
    
    /* allocate memory for address list */
    rc = pl_create(&(iter->data), sizeof(void *), 
                    (int)(MBI_CONFIG.mempool_blocksize / 2));
    if (rc != PL_SUCCESS)
    {
        free(iter);
        if (rc == PL_ERR_MALLOC) 
        {
            P_FUNCFAIL("Could not allocate required memory");
            return MB_ERR_MEMALLOC;
        }
        else 
        {
            P_FUNCFAIL("pl_create() returned with err code %d", rc);
            return MB_ERR_INTERNAL;
        }
    }
    
    /* generate sorted message address array */
    if (mcount > 0)
    {
        ptr_array = get_sorted_filtered_ptr_list(mb, mcount, cmpFunc, \
                filterFunc, filterFuncParams, &elemOut);
        assert(ptr_array != NULL);
    }
    
    /* populate iterator */
    for (i = 0; i < elemOut; i++)
    {
        rc = pl_newnode(iter->data, &new);
        assert(rc == PL_SUCCESS);
        memcpy(new, &ptr_array[i], sizeof(void*));

    }
    
    if (ptr_array != NULL) free(ptr_array);
    
    /* register iter object */
    rc_om = MBI_objmap_push(MBI_OM_iterator, (void*)iter);
    if (rc_om > OM_MAX_INDEX)
    {
        if (rc_om == OM_ERR_MEMALLOC)
        {
            P_FUNCFAIL("Could not allocate required memory");
            return MB_ERR_MEMALLOC;
        }
        else if (rc_om == OM_ERR_OVERFLOW)
        {
            P_FUNCFAIL("Too many iterators created. Objmap key overflow");
            return MB_ERR_OVERFLOW;
        }
        else
        {
            P_FUNCFAIL("MBI_objmap_push() returned with err code %d", rc);
            return MB_ERR_INTERNAL;
        }
    }
    
    /* assign return pointer */
    *itr_ptr  = (MBt_Iterator)rc_om;
    
    P_INFO("Iterator created (iter:%d, board:%d, mcount:%d) - FILTERED+SORTED", 
            (int)rc_om, (int)mb, (int)iter->data->count_current);
    
    return MB_SUCCESS;
}
예제 #12
0
/*! 
 * \brief Tag messages in the board
 * \param[in] node address of CommQueue node
 * \return Return Code
 * 
 * This routine is to be executed during the firs communication stage
 * (::MB_COMM_OLD_PRE_TAGGING).
 * 
 * Pre:
 * - node->mb is valid
 * - board->locked == ::MB_TRUE
 * - board->syncCompleted == ::MB_FALSE
 * - node->stage == ::MB_COMM_OLD_PRE_TAGGING
 * - node->flag_fdrFallback = ::MB_FALSE
 * - node->flag_shareOutbuf = ::MB_FALSE
 * - node->incount == \c NULL
 * - node->outcount == \c NULL
 * - node->inbuf == \c NULL
 * - node->outbuf == \c NULL
 * - node->sendreq == \c NULL
 * - node->recvreq == \c NULL
 * - node->board == \c NULL
 * 
 * Steps:
 * -# Get pointer to board object and cache it in node->board
 * -# Allocate memory for node->outcount
 * -# If node->mb->filter == \c NULL or node->board->data->count_current = 0
 *  -# set node->outcount[*] = node->board->data->count_current
 *  -# set node->flag_shareOutbuf = ::MB_TRUE
 * -# If node->board->filter != \c NULL
 *  -# Use node->board->filter to build tag table in node->board->tt
 *  -# Allocate memory for node->outcount
 *  -# Initialise values in node->outcount[] based on contents of node->board->tt.
 *     Keep count of outcount total as we go along. If total > node->board->data->count_current, 
 *     fallback to full data replication
 *   - clear tag table
 *   - set node->outcount[*] = node->board->data->count_current
 *   - set node->flag_fdrFallback = ::MB_TRUE
 *   - set node->flag_shareOutbuf = ::MB_TRUE
 * -# set node->stage to ::MB_COMM_OLD_READY_FOR_PROP
 * 
 * Post:
 * - node->stage == ::MB_COMM_OLD_READY_FOR_PROP
 * - node->outcount != \c NULL
 * - node->board != \c NULL
 * - if (node->board->filter != NULL)
 *  - if (node->flag_fdrFallback == ::MB_TRUE) node->board->tt == \c NULL
 *  - if (node->flag_fdrFallback == ::MB_FALSE) node->board->tt != \c NULL
 * - if node->board->filter == \c NULL or node->fdr_fallback == \c ::MB_TRUE
 *  - node->flag_shareOutbuf == ::MB_TRUE
 */
int MBI_CommRoutine_OLD_TagMessages(struct MBIt_commqueue *node) {
    
    char window;
    int rc, i, j, c, w, p;
    int total_tagged, mcount;
    void *msg;
    MBIt_TagTable *tt;
    pl_address_node *pl_itr;
    
    /* check that initial values are set properly */
    assert(node->stage == MB_COMM_OLD_PRE_TAGGING);
    assert(node->flag_fdrFallback == MB_FALSE);
    assert(node->flag_shareOutbuf == MB_FALSE);
    assert(node->incount == NULL);
    assert(node->outcount == NULL);
    assert(node->inbuf == NULL);
    assert(node->outbuf == NULL);
    assert(node->recvreq == NULL);
    assert(node->sendreq == NULL);
    assert(node->board == NULL);

    /* get reference to board object and cache ptr in node */
    node->board = (MBIt_Board *)MBI_getMBoardRef(node->mb);
    assert(node->board != NULL);
    if (node->board == NULL) return MB_ERR_INVALID;
    
    P_INFO("COMM: Preparing (Board %d) for sync process", node->mb);
    
    /* check board state */
    assert(node->board->locked == MB_TRUE);
    assert(node->board->syncCompleted == MB_FALSE);
    
    /* get message count */
    /* ignore messages that have already been synced */
    mcount = (int)node->board->data->count_current - 
             (int)node->board->synced_cursor;
  
    /* allocate memory for outcount */
    node->outcount = (int *)calloc((size_t)MBI_CommSize, sizeof(int)); 
    assert(node->outcount != NULL);
    if (node->outcount == NULL) return MB_ERR_MEMALLOC;
    
    /* determined number of messages to send to remote procs */
    if (mcount == 0 || MBI_CommSize == 1) /* nothing to send */
    {
        /* outcount already initialised to 0 (calloc) */
        /*for (i = 0; i < MBI_CommSize; i++) node->outcount[i] = 0;*/
    }
    else if (node->board->filter == (MBIt_filterfunc)NULL) /* no filter */
    {   
        /* send all messages to all procs (except self) */
        for (i = 0; i < MBI_CommSize; i++)
        {
            node->outcount[i] = (i == MBI_CommRank) ? 0 : mcount;
        }
        
        /* outgoing buffer can be shared */
        node->flag_shareOutbuf = MB_TRUE;
        
    }
    else /* filter assigned */
    {        
        P_INFO("COMM: (Board %d) is filtered. Tagging messages", (int)node->mb);
        /* create tag_table and assign to board */
        rc = tt_create(&tt, mcount, MBI_CommSize);
        assert(rc == TT_SUCCESS);
        if (rc != TT_SUCCESS)
        {
            if (rc == TT_ERR_MEMALLOC) return MB_ERR_MEMALLOC;
            else return MB_ERR_INTERNAL;
        }
        node->board->tt = tt; /* assign to board */
        
        /* initialise counters */
        i = j = 0;
        total_tagged = 0;
        
        /* loop thru messages and fill up tag table */
        for (pl_itr = PL_ITERATOR(node->board->data); pl_itr; pl_itr = pl_itr->next)
        {
            assert(i < (int)node->board->data->count_current);
            
            /* skip messages that have already been synced */
            if (i < (int)node->board->synced_cursor) 
            {
                i++;
                continue;
            }
            
            /* get reference to message from iterator */
            msg = PL_NODEDATA(pl_itr);
            assert(msg != NULL);
            if (msg == NULL) return MB_ERR_INTERNAL;
            
            /* c : offset within byte buffer (window)
             * w : window offset within table row
             */
            c = w = 0;
            SETZEROS(window);
            
            /* run filter on message per MPI task */
            for (p = 0; p < MBI_CommSize; p++)
            {
                if (p != MBI_CommRank)
                {   
                    /* if message accepted by filter */
                    if (1 == (*node->board->filter)(msg, p))
                    {
                        /* set bit within our byte buffer */
                        SETBIT(window, c);
                        
                        /* update outcount */
                        node->outcount[p]++;
                        total_tagged++;

                    }
                }
                
                
                /* move index within window */
                c++;
                
                /* when window full, write to table and shift window */
                if (c == 8)
                {
                    /* write byte buffer to table */
                    rc = tt_setbyte(node->board->tt, j, w, window);
                    assert(rc == TT_SUCCESS);
                    
                    /* move window */
                    w += 1;
                    
                    /* reset byte buffer */
                    SETZEROS(window);
                    c = 0;
                }
            }
            
            /* write remaining byte buffer */
            if (w < (int)node->board->tt->row_size)
            {
                rc = tt_setbyte(node->board->tt, j, w, window);
                assert(rc == TT_SUCCESS);
            }
            
            /* increment counter */
            i++;
            j++;
        }
        assert(node->outcount[MBI_CommRank] == 0);
        
        /* Should we fall back to full data replication? */
        if (total_tagged > mcount)
        {
            P_INFO("COMM: (Board %d) Tagged messages <%d> exceeds message count <%d>. "
                    "Delegating filtering to recipient", 
                    (int)node->mb, total_tagged, mcount);
            /* we don't need the tagtable any more */
            node->board->tt = NULL;
            rc = tt_delete(&tt);
            assert(rc == TT_SUCCESS);
            
            /* send all messages to all remote procs */
            node->flag_fdrFallback = MB_TRUE; /* fallback to full data replication */
            node->flag_shareOutbuf = MB_TRUE; /* use shared buffer */
            for (i = 0; i < MBI_CommSize; i++)
            {
                if (node->outcount[i] != 0) node->outcount[i] = mcount;
            }
        }
    }
    
    /* move on to next stage */
    P_INFO("COMM: (Board %d) moving to MB_COMM_OLD_READY_FOR_PROP stage", node->mb);
    node->stage = MB_COMM_OLD_READY_FOR_PROP;
    return MB_SUCCESS;
    
}
예제 #13
0
/*! 
 * \brief Complete the message propagation process
 * \param[in] node address of CommQueue node
 * \return Return Code
 *
 * This routine is to be executed during the ::MB_COMM_OLD_PROPAGATION stage.
 * 
 * Steps:
 * -# if node->pending_in != 0, check receives using MPI_Testany().
 *    For each completed comm:
 *  -# Decrement node->pending_in
 *  -# Check buffer header if delayed_filtering is set
 *   - If set, run each message in buffer through node->board->filter
 *     before adding to local board
 *   - If not set, add all messages in buffer to local board
 *  -# Free node->inbuf[i]
 * 
 * -# if node->pending_out != 0, check sends using MPI_Testany().
 *    For each completed comm:
 *  -# Decrement node->pending_out
 *  -# if node->flag_shareOutbuf == ::MB_FALSE, free node->outbuf[i]
 *  -# if node->flag_shareOutbuf == ::MB_TRUE, free node->outbuf[0] if
 *     node->pending_out == 0
 *
 * -# Check if comms completed?
 *  - if node->pending_in == 0 and node->pending_out == 0
 *   -# free node->incount
 *   -# free node->inbuf
 *   -# free node->outbuf
 *   -# free node->sendreq
 *   -# free node->recvreq
 *   -# Capture node->board->syncLock
 *   -# set node->board->syncCompleted = ::MB_TRUE
 *   -# Release node->board->syncLock
 *   -# Signal node->board->syncCond
 *   -# set node->stage = ::MB_COMM_END
 *   -# return ::MB_SUCCESS_2
 *  - else
 *   -# return ::MB_SUCCESS
 * 
 * 
 * 
 * Post:
 * - if node->pending_in == 0 and node->pending_out == 0
 *  - node->incount == \c NULL
 *  - node->inbuf == \c NULL
 *  - node->outbuf == \c NULL
 *  - node->sendreq == \c NULL
 *  - node->recvreq == \c NULL
 *  - return code == MB_SUCCESS_2
 *  - node->board->syncCompleted == ::MB_TRUE
 *  - node->stage == ::MB_COMM_END
 */ 
int MBI_CommRoutine_OLD_CompletePropagation(struct MBIt_commqueue *node) {
    
    int i, m, p, rc;
    int completed;
    int filter_required;
    void *ptr_new, *msg;
    char *header_byte, *bufptr;

    assert(node->stage == MB_COMM_OLD_PROPAGATION);
    assert(node->outcount == NULL);
    assert(node->incount  != NULL);
    assert(node->sendreq  != NULL);
    assert(node->recvreq  != NULL);
    assert(node->inbuf    != NULL);
    assert(node->outbuf   != NULL);
    assert(node->board    != NULL);
    
    /* ---------- check for completed sends -------------- */
    if (node->pending_out > 0)
    {
        /* check if any of the sends completed */
        rc = MPI_Testsome(MBI_CommSize, node->sendreq, &completed,
                MBI_comm_indices, MPI_STATUSES_IGNORE);
        assert(rc == MPI_SUCCESS);
        if (rc != MPI_SUCCESS) return MB_ERR_MPI;
        
        if (completed > 0)
        {
            /* decrement counter */
            node->pending_out -= completed;

            if (node->flag_shareOutbuf == MB_FALSE)
            {
                assert(node->flag_fdrFallback == MB_FALSE);
                
                /* free buffer of completed sends */
                for (p = 0; p < completed; p++) 
                {
                    i = MBI_comm_indices[p];
                    assert(i != MBI_CommRank);
                    assert(node->outbuf[i] != NULL);
                    assert(node->sendreq[i] == MPI_REQUEST_NULL);
                    
                    free(node->outbuf[i]);
                    node->outbuf[i] = NULL;
                    
                    P_INFO("COMM: (Board %d) send to P%d completed", (int)node->mb, i);
                }
            }
            else if (node->pending_out == 0) /* outbuf shared */
            {
                assert(node->outbuf[0] != NULL);
                assert(node->flag_fdrFallback == MB_TRUE || 
                        node->board->filter == (MBIt_filterfunc)NULL);
                
                /* free shared buffer */
                free(node->outbuf[0]);
                node->outbuf[0] = NULL;
                
                P_INFO("COMM: (Board %d) all sends completed", (int)node->mb);
            }
        }
    }
    
    /* ---------- check for completed receives -------------- */
    if (node->pending_in > 0)
    {
        /* check if any of the sends completed */
        rc = MPI_Testsome(MBI_CommSize, node->recvreq, &completed,
                MBI_comm_indices, MPI_STATUSES_IGNORE);
        assert(rc == MPI_SUCCESS);
        if (rc != MPI_SUCCESS) return MB_ERR_MPI;
        
        if (completed > 0)
        {
            /* decrement counter */
            node->pending_in -= completed;
            
            /* for each completed receive, load messages and clear buffer */
            for (p = 0; p < completed; p++)
            {
                /* which receive completed? */
                i = MBI_comm_indices[p];
                
                assert(node->inbuf[i] != NULL);
                assert(node->recvreq[i] == MPI_REQUEST_NULL); 
                
                /* get reference to header byte */
                header_byte = (char*)(node->inbuf[i]);
                
                /* get flag indicating if filter should be run */
                filter_required = BIT_IS_SET(*header_byte, MBI_COMM_HEADERBYTE_FDR);                 
                
                P_INFO("COMM: (Board %d) receive from P%d completed", (int)node->mb, i);
                #ifdef _EXTRA_INFO
                if (filter_required)
                {
                    P_INFO("COMM: (Board %d) performing delayed filtering on messages", 
                            (int)node->mb);
                }
                #endif
                
                /* location of message buffer is after header (of size 1 byte) */
                bufptr = (char*)(node->inbuf[i]) + 1;
                
                /* for each received message */
                for (m = 0; m < node->incount[i]; m++)
                {
                    /* get pointer to message in buffer */
                    msg = (void*)(bufptr + (node->board->data->elem_size * m));
                    
                    /* do we need to run msg thru filter before storing? */
                    if (filter_required)
                    {
                        assert(node->board->filter != (MBIt_filterfunc)NULL);
                        if (0 == (*node->board->filter)(msg, MBI_CommRank))
                           continue; /* we don't want this message */
                    }
                    
                    /* add new node to local board */
                    rc = pl_newnode(node->board->data, &ptr_new);
                    assert(rc == PL_SUCCESS);
                    /* copy message into node */
                    memcpy(ptr_new, msg, node->board->data->elem_size);

                }
                
                /* we can now free the buffer */
                free(node->inbuf[i]);
                node->inbuf[i] = NULL;

            }
        }
        
        #ifdef _EXTRA_INFO
        if (node->pending_in == 0)
        {
            P_INFO("COMM: (Board %d) all receives completed", (int)node->mb);
        }
        #endif
    }
    
    /* ------------ if all comms completed, clean up and end ------------ */
    
    if (node->pending_in == 0 && node->pending_out == 0)
    {
        /* free up memory */
        free(node->incount);  node->incount = NULL;
        free(node->inbuf);    node->inbuf = NULL;
        free(node->outbuf);   node->outbuf = NULL;
        free(node->sendreq);  node->sendreq = NULL;
        free(node->recvreq);  node->recvreq = NULL;
        
        /* move cursor */
        node->board->synced_cursor = node->board->data->count_current;
        
        /* mark sync as completed */
        node->board->syncCompleted = MB_TRUE;
        
        /* move to end state and indicate that we're done */
        P_INFO("COMM: (Board %d) sync process completed", node->mb);
        node->stage = MB_COMM_END;
        return MB_SUCCESS_2; /* node can be removed from queue */
    }
    else
    {
        /* there are still pending comms. No state change  */
        return MB_SUCCESS;
    }
}
예제 #14
0
/*! 
 * \brief Start propagation of messages
 * \param[in] node address of CommQueue node
 * \return Return Code
 *
 * This routine is to be executed during the ::MB_COMM_OLD_PRE_PROPAGATION stage.
 * 
 * Steps:
 * -# Allocate memory for node->inbuf (based on node->incount)
 * -# Issue MPI_Irecv() for each non-0 counts. node->pending_in++
 * -# Allocate memory for node->outbuf 
 * -# Set up non-blocking sends
 *  - If node->flag_shareOutbuf == ::MB_TRUE
 *   - Allocate memory for node->outbuf[0] + 1 byte for header
 *   - if node->board->filter != \c NULL Set delayed_filtering flag in header to ::MB_TRUE
 *   - if node->board->filter == \c NULL Set delayed_filtering flag in header to ::MB_FALSE
 *   - Issue MPI_Issend() to all remote procs. node->pending_out++
 *  - If node->flag_shareOutbuf == ::MB_FLASE
 *   - Ensure that node->board->filter != \c NULL and 
 *        node->flag_fdrFallback == ::MB_FALSE
 *   - For each remote node i, if node->outcount[i] != 0
 *    - Allocate memory for node->outbuf[i] + 1 byte for header
 *    - Set delayed_filtering flag in header to ::MB_FALSE
 *    - Copy tagged messages for proc i to buffer
 *    - delete node->board->tt
 *    - Issue MPI_Issend(). node->pending_out++
 * -# free node->outcount
 * -# Set node->stage == ::MB_COMM_OLD_PROPAGATION
 * 
 * Post:
 * -# node->stage == ::MB_COMM_OLD_PROPAGATION
 * -# node->outcount == \c NULL
 * -# node->outbuf != \c NULL
 * -# node->inbuf != \c NULL
 * -# node->board->tt == \c NULL
 * 
 */ 
int MBI_CommRoutine_OLD_InitPropagation(struct MBIt_commqueue *node) {
    
    int mcount;
    int w, b, p;
    int i, j, rc, tag, bufloc;
    void *msg;
    char *outptr, *row;
    char *header_byte;
    char **loc;
    char window;
    size_t msgsize;
    pl_address_node *pl_itr;
    
#ifdef _EXTRA_CHECKS
    int *msg_copied;
    msg_copied = (int*)calloc((size_t)MBI_CommSize, sizeof(int));
#endif
    
    assert(node->stage == MB_COMM_OLD_PRE_PROPAGATION);
    assert(node->outcount != NULL);
    assert(node->incount  != NULL);
    assert(node->sendreq  != NULL);
    assert(node->recvreq  != NULL);
    assert(node->pending_in  == 0);
    assert(node->pending_out == 0);
    assert(node->inbuf  ==  NULL);
    assert(node->outbuf ==  NULL);
    assert(node->board  != NULL);
    
    /* generate unique tag from this board */
    assert(node->mb <= MBI_TAG_BASE);
    tag = MBI_TAG_MSGDATA | node->mb;
    assert(tag < MBI_TAG_MAX);
    
    /* get message size and count */
    msgsize = node->board->data->elem_size;
    mcount  = (int)node->board->data->count_current - 
              (int)node->board->synced_cursor;
    
    /* Allocate memory for input buffers */
    node->inbuf = (void **)malloc(sizeof(void*) * MBI_CommSize);
    assert(node->inbuf != NULL);
    if (node->inbuf == NULL) return MB_ERR_MEMALLOC;
    
    /* Allocate memory for output buffers */
    node->outbuf = (void **)malloc(sizeof(void*) * MBI_CommSize);
    assert(node->outbuf != NULL);
    if (node->outbuf == NULL) return MB_ERR_MEMALLOC;
    

    /* ------- issue receives --------- */
    
    assert(node->incount[MBI_CommRank] == 0);
    for (i = 0; i < MBI_CommSize; i++)
    {
        if (node->incount[i] == 0)
        {
            /* no comms from this proc */
            node->inbuf[i]   = NULL;
            node->recvreq[i] = MPI_REQUEST_NULL;
            P_INFO("COMM: (Board %d) no data expected from P%d", (int)node->mb, i);
        }
        else
        {
            /* allocate memory for input buffer */
            node->inbuf[i] = (void*)malloc(1 + (msgsize * node->incount[i]));
            assert(node->inbuf[i] != NULL);
            if (node->inbuf[i] == NULL) return MB_ERR_MEMALLOC;
            
            /* issue non-blocking receive */
            rc = MPI_Irecv(node->inbuf[i], 1 + (int)(msgsize * node->incount[i]), 
                    MPI_BYTE, i, tag, MBI_CommWorld, &(node->recvreq[i]));
            assert(rc == MPI_SUCCESS);
            if (rc != MPI_SUCCESS) return MB_ERR_MPI;
            
            P_INFO("COMM: (Board %d) expecting %d messages from P%d", 
                    (int)node->mb, node->incount[i], i);
            
            /* increment counter */
            node->pending_in++;
        }
    }
    
    /* ----------- build output buffers ----------------- */
    
    for (i = 0; i < MBI_CommSize; i++) node->outbuf[i] = NULL;

    /* create output buffers and copy in messages */
    if (MBI_CommSize == 1 || mcount == 0)
    {
        /* nothing to do if only one proc or no messages */
    }
    else if (node->flag_shareOutbuf == MB_TRUE)
    {
        
        #ifdef _EXTRA_CHECKS
        /* if filter is assigned, buffer sharing only occurs during
         * fallback to full data replication 
         */
        
        if (node->board->filter != (MBIt_filterfunc)NULL && 
                node->board->data->count_current != 0)
        {
            assert(node->flag_fdrFallback == MB_TRUE);
            if (node->flag_fdrFallback != MB_TRUE) return MB_ERR_INTERNAL;
            
            for (i = 0; i< MBI_CommSize; i++) 
            {
                if (i == MBI_CommRank)
                {
                    assert(node->outcount[i] == 0);
                }
                else
                {
                    assert(node->outcount[i] == mcount || node->outcount[i] == 0);
                }
            }
        }
        #endif
        
        /* allocate shared buffer */
        node->outbuf[0] = (void*)malloc(1 + /* one byte for header info */
                                       (msgsize * mcount));
        assert(node->outbuf[0] != NULL);
        if (node->outbuf[0] == NULL) return MB_ERR_MEMALLOC;
        
        /* set header byte */
        header_byte = (char*)(node->outbuf[0]);
        *header_byte = ALLZEROS;
        if (node->flag_fdrFallback == MB_TRUE) /* set flag for FDR */
            *header_byte = *header_byte | MBI_COMM_HEADERBYTE_FDR;
        
        /* location of message buffer is one byte after header */
        outptr = (char*)(node->outbuf[0]) + 1;
        
        /* copy messages into output buffer */
        i = j = 0;
        for (pl_itr = PL_ITERATOR(node->board->data); pl_itr; pl_itr = pl_itr->next)
        {
            
            /* skip messages that have already been synced */
            if (i < (int)node->board->synced_cursor)
            {
                i++;
                continue;
            }
            
            /* get reference to message object */
            msg = PL_NODEDATA(pl_itr);
            assert(msg != NULL);
            
            /* copy into buffer */
            memcpy(outptr + (j*msgsize), msg, msgsize);
            
            /* increment counters */
            i++;
            j++;
        }
        assert(i == (int)node->board->data->count_current);
        assert(j == (int)node->board->data->count_current - (int)node->board->synced_cursor);
    }
    else /* messages are tagged */
    {
        assert(node->board->filter != (MBIt_filterfunc)NULL);
        assert(node->flag_fdrFallback == MB_FALSE);
        
        /* array of pointers to store next location in output buffer */
        loc = (char **)malloc(sizeof(char*) * MBI_CommSize);
        
        /* initialise output buffers */
        assert(node->outcount[MBI_CommRank] == 0);
        for (i = 0; i < MBI_CommSize; i++)
        {
            if (node->outcount[i] == 0) 
            {
                loc[i] = NULL;
            }
            else
            {
                /* allocate memory for output buffers */
                node->outbuf[i] = (void*)malloc(1 + (msgsize * node->outcount[i]));
                assert(node->outbuf[i] != NULL);
                if (node->outbuf[i] == NULL) return MB_ERR_MEMALLOC;
                
                /* set header byte */
                header_byte = (char*)(node->outbuf[i]);
                *header_byte = ALLZEROS;
                
                /* move loc to first message, after header */
                loc[i] = (char*)(node->outbuf[i]) + 1;
            }
        }
        
        /* copy in tagged messages */
        i = j = 0;
        for (pl_itr = PL_ITERATOR(node->board->data); pl_itr; pl_itr = pl_itr->next)
        {
            
            /* skip messages that have already been synced */
            if (i < (int)node->board->synced_cursor) 
            {
                i++;
                continue;
            }
            
            /* get reference to message object */
            msg = PL_NODEDATA(pl_itr);
            assert(msg != NULL);
            
            /* get ptr to row in tag table */
            rc = tt_getrow(node->board->tt, j, &row);
            assert(rc == TT_SUCCESS);
            assert(row != NULL);
            if (rc != TT_SUCCESS || row == NULL) return MB_ERR_INTERNAL;
            
            /* w: window index within row (in units of bytes)
             * b: bit index within window (in units of bits)
             * p: process (mpi task) represented by w&b
             */
            for (w = 0; w < (int)node->board->tt->row_size; w++)
            {
                window = *(row + w);
                
                b = 0;
                while (window != ALLZEROS)
                {
                    if (MSB_IS_SET(window))
                    {
                        /* determine which MPI task this refers to */
                        p = (w * 8) + b;
                        assert(p >= 0);
                        assert(p < MBI_CommSize);
                        assert(p != MBI_CommRank);
                        assert(node->outcount[p] != 0);
                        assert(loc[p] != NULL);
                        
                        #ifdef _EXTRA_CHECKS
                        /* keep track of messages copied into each buffer */
                        msg_copied[p] ++;
                        assert(msg_copied[p] <= node->outcount[p]);
                        #endif
                        
                        /* copy message to appropriate output buffer */
                        memcpy(loc[p], msg, msgsize);
                        
                        /* move to next free location in buffer */
                        loc[p] += msgsize;
                    }
                    
                    /* shift bit and repeat */
                    window = window << 1;
                    b++;
                }
            }
            
            /* on to next message */
            i++;
            j++;
        }
        assert(i == (int)node->board->data->count_current);
        assert(j == (int)node->board->data->count_current - (int)node->board->synced_cursor);
        free(loc);
        
        /* tag table no longer needed */
        rc = tt_delete(&(node->board->tt));
        assert(rc == TT_SUCCESS);
        assert(node->board->tt == NULL);
        
        #ifdef _EXTRA_CHECKS
        for (i = 0; i < MBI_CommSize; i++)
        {
            assert(msg_copied[i] == node->outcount[i]);
        }
        #endif
    }
    
    
    /* ----------- issue sends ----------------- */
    for (i = 0; i < MBI_CommSize; i++)
    {
        if (node->outcount[i] == 0)
        {
            node->sendreq[i] = MPI_REQUEST_NULL;
        }
        else
        {
            /* choose output bufer */
            bufloc = (node->flag_shareOutbuf == MB_TRUE) ? 0 : i;
            assert(node->outbuf[bufloc] != NULL);
            
            /* issue non-blocking send */
            rc = MPI_Issend(node->outbuf[bufloc], 
                    1 + (int)(node->outcount[i] * msgsize), 
                    MPI_BYTE, i, tag, MBI_CommWorld, &(node->sendreq[i]));
            assert(rc == MPI_SUCCESS);
            if (rc != MPI_SUCCESS) return MB_ERR_MPI;
            
            P_INFO("COMM: (Board %d) sending %d messages to P%d", 
                    (int)node->mb, node->outcount[i], i);
            
            /* increment counter */
            node->pending_out++;
        }

    }
    
    #ifdef _EXTRA_CHECKS
    free(msg_copied);
    #endif
    
    /* outcount no longer needed */
    free(node->outcount);
    node->outcount = NULL;
    
    /* move on to next stage */
    P_INFO("COMM: (Board %d) moving to MB_COMM_OLD_PROPAGATION stage", node->mb);
    node->stage = MB_COMM_OLD_PROPAGATION;
    return MB_SUCCESS;
}
예제 #15
0
/*! 
 * \brief Send out expected buffer sizes to all procs
 * \param[in] node address of CommQueue node
 * \return Return Code
 * 
 * This routine is to be executed during the ::MB_COMM_OLD_READY_FOR_PROP stage.
 * 
 * Steps:
 * -# Allocate memory for node->incount
 * -# Allocate memory for node->recvreq
 * -# Issue MPI_Irecv from all remote procs
 * -# Set node->pending_in = 1
 * -# Allocate memory for node->sendreq
 * -# Issue MPI_Issend to all remote procs
 * -# Set node->pending_out = 1
 * -# Set node->stage == ::MB_COMM_OLD_BUFINFO_SENT
 * 
 * Post:
 * - node->stage == ::MB_COMM_OLD_BUFINFO_SENT
 * - node->incount != \c NULL
 * - node->outcount != \c NULL
 * - node->sendreq != \c NULL
 * - node->recvreq != \c NULL
 * - node->pending_out == 1
 * - node->pending_in == 1
 * 
 */ 
int MBI_CommRoutine_OLD_SendBufInfo(struct MBIt_commqueue *node) {
    
    int i, rc, tag;
    
    assert(node->stage == MB_COMM_OLD_READY_FOR_PROP);
    assert(node->outcount != NULL);
    assert(node->incount  == NULL);
    assert(node->board != NULL);
    
    P_INFO("COMM: (Board %d) propagating expected buffer sizes", node->mb);
    
    /* allocate memory for incount */
    node->incount = (int*)malloc(MBI_CommSize * sizeof(int));
    assert(node->incount != NULL);
    if (node->incount == NULL) return MB_ERR_MEMALLOC;
    
    /* allocate memory for sendreq */
    node->sendreq = (MPI_Request *)malloc(MBI_CommSize * sizeof(MPI_Request));
    assert(node->sendreq != NULL);
    if (node->sendreq == NULL) return MB_ERR_MEMALLOC;
    
    /* allocate memory for recvreq */
    node->recvreq = (MPI_Request *)malloc(MBI_CommSize * sizeof(MPI_Request));
    assert(node->recvreq != NULL);
    if (node->recvreq == NULL) return MB_ERR_MEMALLOC;
    
    /* generate unique tag from this board */
    assert(node->mb <= MBI_TAG_BASE);
    tag = MBI_TAG_MSGDATA | node->mb;
    assert(tag < MBI_TAG_MAX);
    
    /* issue irecv from all remote procs */
    for (i = 0; i < MBI_CommSize; i++)
    {
        if (i == MBI_CommRank)
        {
            node->incount[i] = 0;
            node->recvreq[i] = MPI_REQUEST_NULL;
        }
        else
        {
            rc = MPI_Irecv(&(node->incount[i]), 1, MPI_INT, i, tag, 
                    MBI_CommWorld, &(node->recvreq[i])); 
            assert(rc == MPI_SUCCESS);
            if (rc != MPI_SUCCESS) return MB_ERR_MPI;
        }
    }

    
    /* issue issends to all remote procs */
    for (i = 0; i < MBI_CommSize; i++)
    {
        if (i == MBI_CommRank)
        {
            node->sendreq[i] = MPI_REQUEST_NULL;
        }
        else
        {
            /* send send send... */
            rc = MPI_Issend(&(node->outcount[i]), 1, MPI_INT, i, tag, 
                    MBI_CommWorld, &(node->sendreq[i])); 
            assert(rc == MPI_SUCCESS);
            if (rc != MPI_SUCCESS) return MB_ERR_MPI;
        }
    }
    
    node->pending_in  = 1;
    node->pending_out = 1;
    
    /* move on to next stage */
    P_INFO("COMM: (Board %d) moving to MB_COMM_OLD_BUFINFO_SENT stage", node->mb);
    node->stage = MB_COMM_OLD_BUFINFO_SENT;
    return MB_SUCCESS;
    
}
예제 #16
0
/*!
 * \brief Creates a Filter object based on a function
 * \ingroup MB_API
 * \param[out] fh_ptr Address of Filter handle
 * \param[in] filterFunc Pointer to user-defined filter function
 * 
 * The \c filterFunc pointer is placed in a newly allocated ::MBIt_filterfunc_wrapper
 * object and registered with the ::MBI_OM_filter map. The associated 
 * function handle is then returned through \c fh_ptr.
 * 
 * If the function returns with an error code, \c fh_ptr would be set to
 * ::MB_NULL_FILTER
 * 
 * Possible return codes:
 *  - ::MB_SUCCESS 
 *  - ::MB_ERR_INVALID (\c filterFunc is \c NULL)
 *  - ::MB_ERR_MEMALLOC (unable to allocate required memory)
 *  - ::MB_ERR_OVERFLOW (ObjectMap overflow. Too many functions registered.)
 *  - ::MB_ERR_INTERNAL (Internal error. Possibly a bug.)
 */
int MB_Filter_Create(MBt_Filter *fh_ptr, 
        int (*filterFunc)(const void *msg, int pid) ) {
    
    OM_key_t rc_fh;
    MBIt_filterfunc_wrapper *fwrap;
    
    /* first, some quick checks */
    if (fh_ptr == NULL)
    {
    	P_FUNCFAIL("NULL pointer given in first argument");
    	return MB_ERR_INVALID;
    }
    
    /* set NULL return value first, in case of error conditions */
    *fh_ptr = MB_NULL_FILTER;
    
    /* sorry sir, me no deal with null function pointers */
    if (filterFunc == NULL)
    {
    	P_FUNCFAIL("NULL function pointer given in seconds argument");
    	return MB_ERR_INVALID;
    }
    
    /* allocate memory for your function ptr wrapper */
    fwrap = (MBIt_filterfunc_wrapper *)malloc(sizeof(MBIt_filterfunc_wrapper));
    assert(fwrap != NULL);
    if (fwrap == NULL)
    {
    	P_FUNCFAIL("Could not allocate required memory");
    	return MB_ERR_MEMALLOC;
    }
    
    /* embed func pointer into wrapper */
    fwrap->func = (MBIt_filterfunc)filterFunc;
    
    /* register wrapper object in objmap */
    assert(MBI_OM_filter != NULL);
    assert(MBI_OM_filter->type == OM_TYPE_FILTER);
    rc_fh = MBI_objmap_push(MBI_OM_filter, (void*)fwrap);
    if (rc_fh > OM_MAX_INDEX)
    {
        if (rc_fh == OM_ERR_MEMALLOC)
        {
        	P_FUNCFAIL("Could not allocate required memory");
            return MB_ERR_MEMALLOC;
        }
        else if (rc_fh == OM_ERR_OVERFLOW)
        {
        	P_FUNCFAIL("Too many filters created. ObjMap keys overflowed");
            return MB_ERR_OVERFLOW;
        }
        else
        {
        	P_FUNCFAIL("ObjectMap error. MBI_objmap_push() returned %d", (int)rc_fh);
            return MB_ERR_INTERNAL;
        }
    }
    
    /* debug: make sure same fh on all procs */
#ifdef _EXTRA_CHECKS
#ifdef _UNSAFE_CHECKS
    check_all_fh_equal((OM_key_t)rc_fh, (MBIt_filterfunc)filterFunc);
#endif /* _UNSAFE_CHECKS */
#endif /*_EXTRA_CHECKS*/
    
    /* assign fh */
    *fh_ptr = (MBt_Function)rc_fh;
    
    P_INFO("Filter function registered. Handle = %d", (int)rc_fh);
    
    /* victory is mine! */
    return MB_SUCCESS;
}