Exemple #1
0
CoreServer::Result CoreServer::test()
{
#ifdef INTEL
    if (m_info.coreId != 0)
    {
        FileSystemMessage msg;
        msg.type   = ChannelMessage::Request;
        msg.action = StatFile;
        msg.path = (char *)0x12345678;
        msg.size = m_info.coreId;
        m_toMaster->write(&msg);
    }
    else
    {
        FileSystemMessage msg;
        Size numCores = m_cores->getCores().count();

        for (Size i = 1; i < numCores; i++)
        {
            MemoryChannel *ch = (MemoryChannel *) m_fromSlave->get(i);
            if (!ch)
                return IOError;

            // TODO: replace with ChannelClient::syncReceiveFrom
            while (ch->read(&msg) != Channel::Success);

            if (msg.action == StatFile)
            {
                NOTICE("core" << i << " send a Ping");
            }
        }

    }
#endif /* INTEL */
    return Success;
}
Exemple #2
0
CoreServer::Result CoreServer::setupChannels()
{
#ifdef INTEL
    SystemInformation info;

    if (info.coreId == 0)
    {
        Size numCores = m_cores->getCores().count();

        m_toSlave    = new Index<MemoryChannel>(numCores);
        m_fromSlave  = new Index<MemoryChannel>(numCores);

        for (Size i = 1; i < numCores; i++)
        {
            MemoryChannel *ch = new MemoryChannel();
            CoreInfo *coreInfo = (CoreInfo *) m_coreInfo->get(i);
            ch->setMode(Channel::Producer);
            ch->setMessageSize(sizeof(FileSystemMessage));
            ch->setPhysical(coreInfo->coreChannelAddress + (PAGESIZE * 2),
                            coreInfo->coreChannelAddress + (PAGESIZE * 3));
            m_toSlave->insert(i, *ch);

            ch = new MemoryChannel();
            ch->setMode(Channel::Consumer);
            ch->setMessageSize(sizeof(FileSystemMessage));
            ch->setPhysical(coreInfo->coreChannelAddress,
                            coreInfo->coreChannelAddress + PAGESIZE);
            m_fromSlave->insert(i, *ch);
        }
    }
    else
    {
        m_toMaster = new MemoryChannel();
        m_toMaster->setMode(Channel::Producer);
        m_toMaster->setMessageSize(sizeof(FileSystemMessage));
        m_toMaster->setPhysical(info.coreChannelAddress,
                                info.coreChannelAddress + PAGESIZE);

        m_fromMaster = new MemoryChannel();
        m_fromMaster->setMode(Channel::Consumer);
        m_fromMaster->setMessageSize(sizeof(FileSystemMessage));
        m_fromMaster->setPhysical(info.coreChannelAddress + (PAGESIZE * 2),
                                  info.coreChannelAddress + (PAGESIZE * 3));
    }
#endif /* INTEL */
    return Success;
}
Exemple #3
0
ChannelClient::Result ChannelClient::connect(ProcessID pid)
{
    Address prodAddr, consAddr;
    SystemInformation info;

    // Allocate consumer
    MemoryChannel *cons = new MemoryChannel;
    if (!cons)
    {
        ERROR("failed to allocate consumer MemoryChannel object");
        return OutOfMemory;
    }
    cons->setMessageSize(sizeof(FileSystemMessage));
    cons->setMode(Channel::Consumer);

    // Allocate producer
    MemoryChannel *prod = new MemoryChannel;
    if (!prod)
    {
        ERROR("failed to allocate producer MemoryChannel object");
        delete cons;
        return OutOfMemory;
    }
    prod->setMessageSize(sizeof(FileSystemMessage));
    prod->setMode(Channel::Producer);

    // Call VMShare to create shared memory mapping for MemoryChannel.
    ProcessShares::MemoryShare share;
    share.pid    = pid;
    share.coreId = info.coreId;
    share.tagId  = 0;
    share.range.size = PAGESIZE * 4;
    share.range.virt = 0;
    share.range.phys = 0;
    share.range.access = Memory::User | Memory::Readable | Memory::Writable | Memory::Uncached;

    // Create shared memory mapping
    Error r = VMShare(pid, API::Create, &share);
    switch (r)
    {
        case API::Success:
        {
            DEBUG("mapped new shared at phys=" << (void *) share.range.phys <<
                  " virt=" << (void *) share.range.virt);
            prodAddr = share.range.virt;
            consAddr = share.range.virt + (PAGESIZE * 2);
            break;
        }
        case API::AlreadyExists:
        {
            DEBUG("using already shared at phys=" << (void *) share.range.phys <<
                  " virt=" << (void *) share.range.virt);
            prodAddr = share.range.virt + (PAGESIZE * 2);
            consAddr = share.range.virt;
            break;
        }
        default:
        {
            ERROR("failed to create shared memory mapping for new channel");
            return IOError;
        }
    }

    // Setup producer memory address
    if (prod->setVirtual(prodAddr, prodAddr + PAGESIZE) != MemoryChannel::Success)
    {
        ERROR("failed to set producer virtual memory address");
        delete prod;
        delete cons;
        return IOError;
    }

    // Setup consumer memory address
    if (cons->setVirtual(consAddr, consAddr + PAGESIZE) != MemoryChannel::Success)
    {
        ERROR("failed to set consumer virtual memory address");
        delete prod;
        delete cons;
        return IOError;
    }

    // Register channels
    m_registry->registerConsumer(pid, cons);
    m_registry->registerProducer(pid, prod);
    return Success;
}
Exemple #4
0
void CoreServer::createProcess(FileSystemMessage *msg)
{
    char cmd[128];
    Memory::Range range;

    if (m_info.coreId == 0)
    {
        MemoryChannel *ch = (MemoryChannel *) m_toSlave->get(msg->size);

        if (!ch)
        {
            ERROR("invalid coreId=" << msg->size);
            msg->result = EBADF;
            return;
        }

        // TODO:move in libmpi?
        range.virt = (Address) msg->buffer;
        VMCtl(msg->from, LookupVirtual, &range);
        msg->buffer = (char *) range.phys;

        range.virt = (Address) msg->path;
        VMCtl(msg->from, LookupVirtual, &range);
        msg->path = (char *) range.phys;

        if (ch->write(msg) != Channel::Success)
        {
            ERROR("failed to write channel on core"<<msg->size);
            msg->result = EBADF;
            return;
        }
        DEBUG("creating program at phys " << (void *) msg->buffer << " on core" << msg->size);

        ch = (MemoryChannel *) m_fromSlave->get(msg->size);
        if (!ch)
        {
            ERROR("cannot find read channel for core" << msg->size);
            msg->result = EBADF;
            return;
        }
        // TODO: replace with ChannelClient::syncReceiveFrom
        while (ch->read(msg) != Channel::Success);
        DEBUG("program created with result " << (int)msg->result << " at core" << msg->size);

        msg->result = ESUCCESS;
        //IPCMessage(msg->from, API::Send, msg, sizeof(*msg));
        ChannelClient::instance->syncSendTo(msg, msg->from);
    }
    else
    {
        VMCopy(SELF, API::ReadPhys, (Address) cmd, (Address) msg->path, sizeof(cmd));

        range.phys   = (Address) msg->buffer;
        range.virt   = 0;
        range.access = Memory::Readable | Memory::User;
        range.size   = msg->offset;
        VMCtl(SELF, Map, &range);

        pid_t pid = spawn(range.virt, msg->offset, cmd);
        int status;

        // reply to master
        msg->result = ESUCCESS;
        while (m_toMaster->write(msg) != Channel::Success);

        // TODO: temporary make coreserver waitpid() to save polling time
        waitpid(pid, &status, 0);
    }
}
Exemple #5
0
int MPI_Init(int *argc, char ***argv)
{
    SystemInformation info;
    FileSystemMessage msg;
    struct stat st;
    char *programName = (*argv)[0];
    char programPath[64];
    u8 *programBuffer;
    int fd;
    Memory::Range memChannelBase;

    // If we are master (node 0):
    if (info.coreId == 0)
    {
        msg.type   = ChannelMessage::Request;
        msg.action = ReadFile;
        msg.from = SELF;
        ChannelClient::instance->syncSendReceive(&msg, CORESRV_PID);

        // provide -n COUNT, --help and other stuff in here too.
        // to influence the launching of more MPI programs
        coreCount = msg.size;

        // Read our own ELF program to a buffer and pass it to CoreServer
        // for creating new programs on the remote core.
        if (strncmp(programName, "/bin/", 5) != 0)
            snprintf(programPath, sizeof(programPath), "/bin/%s", programName);
        else
            strlcpy(programPath, programName, sizeof(programPath));

        if (stat(programPath, &st) != 0)
        {
            printf("%s: failed to stat '%s': %s\n",
                    programName, programPath, strerror(errno));
            return MPI_ERR_BAD_FILE;
        }
        programBuffer = new u8[st.st_size];
        MemoryBlock::set(programBuffer, 0, st.st_size);

        // Read ELF program
        if ((fd = open(programPath, O_RDONLY)) == -1)
        {
            printf("%s: failed to open '%s': %s\n",
                    programName, programPath, strerror(errno));
            return MPI_ERR_BAD_FILE;
        }
        if (read(fd, programBuffer, st.st_size) != st.st_size)
        {
            printf("%s: failed to read '%s': %s\n",
                    programName, programPath, strerror(errno));
            return MPI_ERR_BAD_FILE;
        }
        if (close(fd) != 0)
        {
            printf("%s: failed to close '%s': %s\n",
                    programName, programPath, strerror(errno));
            return MPI_ERR_BAD_FILE;
        }

        // Allocate memory space on the local processor for the whole
        // UniChannel array, NxN communication with MPI.
        // Then pass the channel offset physical address as an argument -addr 0x.... to spawn()
        memChannelBase.size = (PAGESIZE * 2) * (msg.size * msg.size);
        memChannelBase.phys = 0;
        memChannelBase.virt = 0;
        memChannelBase.access = Memory::Readable | Memory::Writable | Memory::User;
        if (VMCtl(SELF, Map, &memChannelBase) != API::Success)
        {
            printf("%s: failed to allocate MemoryChannel\n",
                    programName);
            return MPI_ERR_NO_MEM;
        }
        printf("%s: MemoryChannel at physical address %x\n",
                programName, memChannelBase.phys);

        // Clear channel pages
        MemoryBlock::set((void *) memChannelBase.virt, 0, memChannelBase.size);

        // now create the slaves using coreservers.
        for (Size i = 1; i < coreCount; i++)
        {
            // TODO: check for cmd buffer size...
            char *cmd = new char[512];
            snprintf(cmd, 512, "%s -a %x -c %d",
                     programPath, memChannelBase.phys, coreCount);

            for (int j = 1; j < *argc; j++)
            {
                strcat(cmd, " ");
                strcat(cmd, (*argv)[j]);
            }

            msg.type   = ChannelMessage::Request;
            msg.action = CreateFile;
            msg.size   = i;
            msg.buffer = (char *) programBuffer;
            msg.offset = st.st_size;
            msg.path   = cmd;
            ChannelClient::instance->syncSendReceive(&msg, CORESRV_PID);

            if (msg.result != ESUCCESS)
            {
                printf("%s: failed to create process on core%d\n",
                        programName, i);
                return MPI_ERR_SPAWN;
            }
        }
    }
    else
    {
        // If we are slave (node N): 
        // read the -addr argument, and map the UniChannels into our address space.
        for (int i = 1; i < (*argc); i++)
        {
            if (!strcmp((*argv)[i], "--addr") ||
                !strcmp((*argv)[i], "-a"))
            {
                if ((*argc) < i+1)
                    return MPI_ERR_ARG;

                String s = (*argv)[i+1];
                memChannelBase.phys = s.toLong(Number::Hex);
                i++;
            }
            else if (!strcmp((*argv)[i], "--cores") ||
                     !strcmp((*argv)[i], "-c"))
            {
                if ((*argc) < i+1)
                    return MPI_ERR_ARG;
                coreCount = atoi((*argv)[i+1]);
                i++;
            }
            // Unknown MPI argument. Pass the rest to the user program.
            else
            {
                (*argc) -= (i-1);
                (*argv)[i-1] = (*argv)[0];
                (*argv) += (i-1);
                break;
            }
        }
    }

    // Create MemoryChannels
    readChannel  = new Index<MemoryChannel>(coreCount);
    writeChannel = new Index<MemoryChannel>(coreCount);

    // Fill read channels
    for (Size i = 0; i < coreCount; i++)
    {
        MemoryChannel *ch = new MemoryChannel();
        ch->setMode(MemoryChannel::Consumer);
        ch->setMessageSize(sizeof(MPIMessage));
        ch->setPhysical(MEMBASE(info.coreId) + (PAGESIZE * 2 * i),
                        MEMBASE(info.coreId) + (PAGESIZE * 2 * i) + PAGESIZE);
        readChannel->insert(i, *ch);

        if (info.coreId == 0)
        printf("%s: read: core%d: data=%x feedback=%x base%d=%x\n", (*argv)[0], i, 
            MEMBASE(info.coreId) + (PAGESIZE * 2 * i),
            MEMBASE(info.coreId) + (PAGESIZE * 2 * i) + PAGESIZE,
            i, MEMBASE(i));
    }

    // Fill write channels
    for (Size i = 0; i < coreCount; i++)
    {
        MemoryChannel *ch = new MemoryChannel();
        ch->setMode(MemoryChannel::Producer);
        ch->setMessageSize(sizeof(MPIMessage));
        ch->setPhysical(MEMBASE(i) + (PAGESIZE * 2 * info.coreId),
                        MEMBASE(i) + (PAGESIZE * 2 * info.coreId) + PAGESIZE);
        writeChannel->insert(i, *ch);

        if (info.coreId == 0)
        printf("%s: write: core%d: data=%x feedback=%x base%d=%x\n", (*argv)[0], i, 
            MEMBASE(i) + (PAGESIZE * 2 * info.coreId),
            MEMBASE(i) + (PAGESIZE * 2 * info.coreId) + PAGESIZE,
            i, MEMBASE(i));
    }

    return MPI_SUCCESS;
}